mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2024-12-13 11:53:59 +02:00
Merge branch 'master' into PGPRO-421
This commit is contained in:
commit
07c7c14df6
1
.gitignore
vendored
1
.gitignore
vendored
@ -33,6 +33,7 @@
|
||||
/tests/helpers/*pyc
|
||||
|
||||
# Extra files
|
||||
/src/pg_crc.c
|
||||
/src/datapagemap.c
|
||||
/src/datapagemap.h
|
||||
/src/logging.h
|
||||
|
29
COPYRIGHT
29
COPYRIGHT
@ -1,29 +0,0 @@
|
||||
Copyright (c) 2015-2017, Postgres Professional
|
||||
Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
|
||||
Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
|
||||
Portions Copyright (c) 1994, The Regents of the University of California
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
(NTT) nor the names of its contributors may be used to endorse or
|
||||
promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
LICENSE
Normal file
22
LICENSE
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2015-2018, Postgres Professional
|
||||
Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
|
||||
Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
|
||||
Portions Copyright (c) 1994, The Regents of the University of California
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its
|
||||
documentation for any purpose, without fee, and without a written agreement
|
||||
is hereby granted, provided that the above copyright notice and this
|
||||
paragraph and the following two paragraphs appear in all copies.
|
||||
|
||||
IN NO EVENT SHALL POSTGRES PROFESSIONAL BE LIABLE TO ANY PARTY FOR
|
||||
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
||||
LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
||||
DOCUMENTATION, EVEN IF POSTGRES PROFESSIONAL HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
POSTGRES PROFESSIONAL SPECIFICALLY DISCLAIMS ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
ON AN "AS IS" BASIS, AND POSTGRES PROFESSIONAL HAS NO OBLIGATIONS TO
|
||||
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
35
Makefile
35
Makefile
@ -1,16 +1,23 @@
|
||||
PROGRAM = pg_probackup
|
||||
OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o \
|
||||
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
|
||||
src/xlogreader.o src/streamutil.o src/receivelog.o \
|
||||
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
|
||||
src/utils/json.o src/utils/thread.o src/merge.o
|
||||
|
||||
EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \
|
||||
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h
|
||||
# utils
|
||||
OBJS = src/utils/json.o src/utils/logger.o src/utils/parray.o \
|
||||
src/utils/pgut.o src/utils/thread.o
|
||||
|
||||
INCLUDES = src/datapagemap.h src/logging.h src/receivelog.h src/streamutil.h
|
||||
OBJS += src/archive.o src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \
|
||||
src/parsexlog.o src/pg_probackup.o src/restore.o src/show.o src/util.o \
|
||||
src/validate.o
|
||||
|
||||
# borrowed files
|
||||
OBJS += src/pg_crc.o src/datapagemap.o src/receivelog.o src/streamutil.o \
|
||||
src/xlogreader.o
|
||||
|
||||
EXTRA_CLEAN = src/pg_crc.c src/datapagemap.c src/datapagemap.h src/logging.h \
|
||||
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h \
|
||||
src/xlogreader.c
|
||||
|
||||
INCLUDES = src/datapagemap.h src/logging.h src/streamutil.h src/receivelog.h
|
||||
|
||||
ifdef USE_PGXS
|
||||
PG_CONFIG = pg_config
|
||||
@ -38,7 +45,7 @@ EXTRA_CLEAN += src/walmethods.c src/walmethods.h
|
||||
INCLUDES += src/walmethods.h
|
||||
endif
|
||||
|
||||
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc
|
||||
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_srcdir)/$(subdir)/src
|
||||
override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS)
|
||||
PG_LIBS = $(libpq_pgport) ${PTHREAD_CFLAGS}
|
||||
|
||||
@ -46,14 +53,14 @@ all: checksrcdir $(INCLUDES);
|
||||
|
||||
$(PROGRAM): $(OBJS)
|
||||
|
||||
src/xlogreader.c: $(top_srcdir)/src/backend/access/transam/xlogreader.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/backend/access/transam/xlogreader.c $@
|
||||
src/datapagemap.c: $(top_srcdir)/src/bin/pg_rewind/datapagemap.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/datapagemap.c $@
|
||||
src/datapagemap.h: $(top_srcdir)/src/bin/pg_rewind/datapagemap.h
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/datapagemap.h $@
|
||||
src/logging.h: $(top_srcdir)/src/bin/pg_rewind/logging.h
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_rewind/logging.h $@
|
||||
src/pg_crc.c: $(top_srcdir)/src/backend/utils/hash/pg_crc.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/backend/utils/hash/pg_crc.c $@
|
||||
src/receivelog.c: $(top_srcdir)/src/bin/pg_basebackup/receivelog.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/receivelog.c $@
|
||||
src/receivelog.h: $(top_srcdir)/src/bin/pg_basebackup/receivelog.h
|
||||
@ -62,6 +69,8 @@ src/streamutil.c: $(top_srcdir)/src/bin/pg_basebackup/streamutil.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.c $@
|
||||
src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
|
||||
src/xlogreader.c: $(top_srcdir)/src/backend/access/transam/xlogreader.c
|
||||
rm -f $@ && $(LN_S) $(srchome)/src/backend/access/transam/xlogreader.c $@
|
||||
|
||||
|
||||
ifeq (,$(filter 9.5 9.6,$(MAJORVERSION)))
|
||||
|
@ -7,7 +7,7 @@ The utility is compatible with:
|
||||
|
||||
`PTRACK` backup support provided via following options:
|
||||
* vanilla PostgreSQL compiled with ptrack patch. Currently there are patches for [PostgreSQL 9.6](https://gist.githubusercontent.com/gsmol/5b615c971dfd461c76ef41a118ff4d97/raw/e471251983f14e980041f43bea7709b8246f4178/ptrack_9.6.6_v1.5.patch) and [PostgreSQL 10](https://gist.githubusercontent.com/gsmol/be8ee2a132b88463821021fd910d960e/raw/de24f9499f4f314a4a3e5fae5ed4edb945964df8/ptrack_10.1_v1.5.patch)
|
||||
* Postgres Pro Standard 9.5, 9.6, 10
|
||||
* Postgres Pro Standard 9.5, 9.6, 10, 11
|
||||
* Postgres Pro Enterprise 9.5, 9.6, 10
|
||||
|
||||
As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data:
|
||||
@ -38,7 +38,7 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp
|
||||
|
||||
`pg_probackup` currently has the following limitations:
|
||||
* Creating backups from a remote server is currently not supported.
|
||||
* The server from which the backup was taken and the restored server must be compatible by the [block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#guc-block-size) and [wal_block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#guc-wal-block-size) parameters and have the same major release number.
|
||||
* The server from which the backup was taken and the restored server must be compatible by the [block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#GUC-BLOCK-SIZE) and [wal_block_size](https://postgrespro.com/docs/postgresql/current/runtime-config-preset#GUC-WAL-BLOCK-SIZE) parameters and have the same major release number.
|
||||
* Microsoft Windows operating system is not supported.
|
||||
* Configuration files outside of PostgreSQL data directory are not included into the backup and should be backed up separately.
|
||||
|
||||
@ -85,7 +85,7 @@ Currently the latest documentation can be found at [Postgres Pro Enterprise docu
|
||||
|
||||
## Licence
|
||||
|
||||
This module available under the same license as [PostgreSQL](https://www.postgresql.org/about/licence/).
|
||||
This module available under the [license](LICENSE) similar to [PostgreSQL](https://www.postgresql.org/about/licence/).
|
||||
|
||||
## Feedback
|
||||
|
||||
|
@ -7,7 +7,7 @@ my $pgsrc="";
|
||||
if (@ARGV==1)
|
||||
{
|
||||
$pgsrc = shift @ARGV;
|
||||
if($pgsrc == "--help"){
|
||||
if($pgsrc eq "--help"){
|
||||
print STDERR "Usage $0 pg-source-dir \n";
|
||||
print STDERR "Like this: \n";
|
||||
print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n";
|
||||
@ -157,6 +157,7 @@ sub build_pgprobackup
|
||||
'thread.c'
|
||||
);
|
||||
$probackup->AddFile('src/backend/access/transam/xlogreader.c');
|
||||
$probackup->AddFile('src/backend/utils/hash/pg_crc.c');
|
||||
$probackup->AddFiles(
|
||||
'src/bin/pg_basebackup',
|
||||
'receivelog.c',
|
||||
|
@ -165,6 +165,7 @@
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
|
||||
<ClCompile Include="@PGSRC@\backend\utils\hash\pg_crc.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
|
||||
|
@ -165,6 +165,7 @@
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
|
||||
<ClCompile Include="@PGSRC@\backend\utils\hash\pg_crc.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_rewind\datapagemap.c" />
|
||||
|
@ -160,6 +160,7 @@
|
||||
<!-- @PGROOT@\lib;@ADDLIBS@ @PGSRC@ @ADDINCLUDE@ -->
|
||||
<ItemGroup>
|
||||
<ClCompile Include="@PGSRC@\backend\access\transam\xlogreader.c" />
|
||||
<ClCompile Include="@PGSRC@\backend\utils\hash\pg_crc.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\receivelog.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\streamutil.c" />
|
||||
<ClCompile Include="@PGSRC@\bin\pg_basebackup\walmethods.c" />
|
||||
|
@ -3,7 +3,7 @@
|
||||
* archive.c: - pg_probackup specific archive commands for archive backups.
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 2017, Postgres Professional
|
||||
* Portions Copyright (c) 2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
162
src/backup.c
162
src/backup.c
@ -3,7 +3,7 @@
|
||||
* backup.c: backup DB cluster, archived WAL
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -306,7 +306,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
to_path, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
INIT_CRC32C(file->crc);
|
||||
INIT_TRADITIONAL_CRC32(file->crc);
|
||||
|
||||
/* read from stream and write to backup file */
|
||||
while (1)
|
||||
@ -332,14 +332,14 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
{
|
||||
write_buffer_size = Min(row_length, sizeof(buf));
|
||||
memcpy(buf, copybuf, write_buffer_size);
|
||||
COMP_CRC32C(file->crc, buf, write_buffer_size);
|
||||
COMP_TRADITIONAL_CRC32(file->crc, buf, write_buffer_size);
|
||||
|
||||
/* TODO calc checksum*/
|
||||
if (fwrite(buf, 1, write_buffer_size, out) != write_buffer_size)
|
||||
{
|
||||
errno_tmp = errno;
|
||||
/* oops */
|
||||
FIN_CRC32C(file->crc);
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
fclose(out);
|
||||
PQfinish(conn);
|
||||
elog(ERROR, "cannot write to \"%s\": %s", to_path,
|
||||
@ -363,7 +363,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
}
|
||||
|
||||
file->write_size = (int64) file->read_size;
|
||||
FIN_CRC32C(file->crc);
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
|
||||
fclose(out);
|
||||
}
|
||||
@ -477,6 +477,9 @@ do_backup_instance(void)
|
||||
|
||||
pgBackup *prev_backup = NULL;
|
||||
parray *prev_backup_filelist = NULL;
|
||||
parray *backup_list = NULL;
|
||||
|
||||
pgFile *pg_control = NULL;
|
||||
|
||||
elog(LOG, "Database backup start");
|
||||
i = 0;
|
||||
@ -531,7 +534,6 @@ do_backup_instance(void)
|
||||
current.backup_mode == BACKUP_MODE_DIFF_PTRACK ||
|
||||
current.backup_mode == BACKUP_MODE_DIFF_DELTA)
|
||||
{
|
||||
parray *backup_list;
|
||||
char prev_backup_filelist_path[MAXPGPATH];
|
||||
|
||||
/* get list of backups already taken */
|
||||
@ -541,7 +543,6 @@ do_backup_instance(void)
|
||||
if (prev_backup == NULL)
|
||||
elog(ERROR, "Valid backup on current timeline is not found. "
|
||||
"Create new FULL backup before an incremental one.");
|
||||
parray_free(backup_list);
|
||||
|
||||
pgBackupGetPath(prev_backup, prev_backup_filelist_path,
|
||||
lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST);
|
||||
@ -556,8 +557,8 @@ do_backup_instance(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* It`s illegal to take PTRACK backup if LSN from ptrack_control() is not equal to
|
||||
* stort_backup LSN of previous backup
|
||||
* It`s illegal to take PTRACK backup if LSN from ptrack_control() is not
|
||||
* equal to stop_lsn of previous backup.
|
||||
*/
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
@ -783,9 +784,34 @@ do_backup_instance(void)
|
||||
parray_free(prev_backup_filelist);
|
||||
}
|
||||
|
||||
/* In case of backup from replica >= 9.6 we must fix minRecPoint,
|
||||
* First we must find pg_control in backup_files_list.
|
||||
*/
|
||||
if (current.from_replica && !exclusive_backup)
|
||||
{
|
||||
char pg_control_path[MAXPGPATH];
|
||||
|
||||
snprintf(pg_control_path, sizeof(pg_control_path), "%s/%s", pgdata, "global/pg_control");
|
||||
|
||||
for (i = 0; i < parray_num(backup_files_list); i++)
|
||||
{
|
||||
pgFile *tmp_file = (pgFile *) parray_get(backup_files_list, i);
|
||||
|
||||
if (strcmp(tmp_file->path, pg_control_path) == 0)
|
||||
{
|
||||
pg_control = tmp_file;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Notify end of backup */
|
||||
pg_stop_backup(¤t);
|
||||
|
||||
if (current.from_replica && !exclusive_backup)
|
||||
set_min_recovery_point(pg_control, database_path, current.stop_lsn);
|
||||
|
||||
/* Add archived xlog files into the list of files of this backup */
|
||||
if (stream_wal)
|
||||
{
|
||||
@ -819,7 +845,7 @@ do_backup_instance(void)
|
||||
}
|
||||
|
||||
/* Print the list of files to backup catalog */
|
||||
pgBackupWriteFileList(¤t, backup_files_list, pgdata);
|
||||
write_backup_filelist(¤t, backup_files_list, pgdata);
|
||||
|
||||
/* Compute summary of size of regular files in the backup */
|
||||
for (i = 0; i < parray_num(backup_files_list); i++)
|
||||
@ -834,6 +860,13 @@ do_backup_instance(void)
|
||||
current.data_bytes += file->write_size;
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
if (backup_list)
|
||||
{
|
||||
parray_walk(backup_list, pgBackupFree);
|
||||
parray_free(backup_list);
|
||||
}
|
||||
|
||||
parray_walk(backup_files_list, pgFileFree);
|
||||
parray_free(backup_files_list);
|
||||
backup_files_list = NULL;
|
||||
@ -912,7 +945,7 @@ do_backup(time_t start_time)
|
||||
}
|
||||
}
|
||||
|
||||
if (current.from_replica)
|
||||
if (current.from_replica && exclusive_backup)
|
||||
{
|
||||
/* Check master connection options */
|
||||
if (master_host == NULL)
|
||||
@ -1118,8 +1151,11 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
|
||||
params[0] = label;
|
||||
|
||||
/* For replica we call pg_start_backup() on master */
|
||||
conn = (backup->from_replica) ? master_conn : backup_conn;
|
||||
/* For 9.5 replica we call pg_start_backup() on master */
|
||||
if (backup->from_replica && exclusive_backup)
|
||||
conn = master_conn;
|
||||
else
|
||||
conn = backup_conn;
|
||||
|
||||
/* 2nd argument is 'fast'*/
|
||||
params[1] = smooth ? "false" : "true";
|
||||
@ -1147,16 +1183,18 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
|
||||
PQclear(res);
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE &&
|
||||
(!(backup->from_replica && !exclusive_backup)))
|
||||
/*
|
||||
* Switch to a new WAL segment. It is necessary to get archived WAL
|
||||
* segment, which includes start LSN of current backup.
|
||||
* Don`t do this for replica backups unless it`s PG 9.5
|
||||
*/
|
||||
pg_switch_wal(conn);
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
/* In PAGE mode wait for current segment... */
|
||||
wait_wal_lsn(backup->start_lsn, true, false);
|
||||
wait_wal_lsn(backup->start_lsn, true, false);
|
||||
/*
|
||||
* Do not wait start_lsn for stream backup.
|
||||
* Because WAL streaming will start after pg_start_backup() in stream
|
||||
@ -1166,8 +1204,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
/* ...for others wait for previous segment */
|
||||
wait_wal_lsn(backup->start_lsn, true, true);
|
||||
|
||||
/* Wait for start_lsn to be replayed by replica */
|
||||
if (backup->from_replica)
|
||||
/* In case of backup from replica for PostgreSQL 9.5
|
||||
* wait for start_lsn to be replayed by replica
|
||||
*/
|
||||
if (backup->from_replica && exclusive_backup)
|
||||
wait_replica_wal_lsn(backup->start_lsn, true);
|
||||
}
|
||||
|
||||
@ -1222,7 +1262,8 @@ pg_ptrack_support(void)
|
||||
|
||||
/* Now we support only ptrack versions upper than 1.5 */
|
||||
if (strcmp(PQgetvalue(res_db, 0, 0), "1.5") != 0 &&
|
||||
strcmp(PQgetvalue(res_db, 0, 0), "1.6") != 0)
|
||||
strcmp(PQgetvalue(res_db, 0, 0), "1.6") != 0 &&
|
||||
strcmp(PQgetvalue(res_db, 0, 0), "1.7") != 0)
|
||||
{
|
||||
elog(WARNING, "Update your ptrack to the version 1.5 or upper. Current version is %s", PQgetvalue(res_db, 0, 0));
|
||||
PQclear(res_db);
|
||||
@ -1312,7 +1353,9 @@ pg_ptrack_clear(void)
|
||||
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
||||
|
||||
tmp_conn = pgut_connect(dbname);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()", 0, NULL);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
|
||||
0, NULL);
|
||||
PQclear(res);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
@ -1514,7 +1557,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
GetXLogFileName(wal_segment, tli, targetSegNo, xlog_seg_size);
|
||||
|
||||
/*
|
||||
* In pg_start_backup we wait for 'lsn' in 'pg_wal' directory iff it is
|
||||
* In pg_start_backup we wait for 'lsn' in 'pg_wal' directory if it is
|
||||
* stream and non-page backup. Page backup needs archived WAL files, so we
|
||||
* wait for 'lsn' in archive 'wal' directory for page backups.
|
||||
*
|
||||
@ -1535,13 +1578,19 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
{
|
||||
join_path_components(wal_segment_path, arclog_path, wal_segment);
|
||||
wal_segment_dir = arclog_path;
|
||||
timeout = archive_timeout;
|
||||
|
||||
if (archive_timeout > 0)
|
||||
timeout = archive_timeout;
|
||||
else
|
||||
timeout = ARCHIVE_TIMEOUT_DEFAULT;
|
||||
|
||||
}
|
||||
|
||||
if (wait_prev_segment)
|
||||
elog(LOG, "Looking for segment: %s", wal_segment);
|
||||
else
|
||||
elog(LOG, "Looking for LSN: %X/%X in segment: %s", (uint32) (lsn >> 32), (uint32) lsn, wal_segment);
|
||||
elog(LOG, "Looking for LSN: %X/%X in segment: %s",
|
||||
(uint32) (lsn >> 32), (uint32) lsn, wal_segment);
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
snprintf(gz_wal_segment_path, sizeof(gz_wal_segment_path), "%s.gz",
|
||||
@ -1694,7 +1743,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
PGresult *tablespace_map_content = NULL;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr restore_lsn = InvalidXLogRecPtr;
|
||||
//XLogRecPtr restore_lsn = InvalidXLogRecPtr;
|
||||
int pg_stop_backup_timeout = 0;
|
||||
char path[MAXPGPATH];
|
||||
char backup_label[MAXPGPATH];
|
||||
@ -1714,16 +1763,21 @@ pg_stop_backup(pgBackup *backup)
|
||||
if (!backup_in_progress)
|
||||
elog(ERROR, "backup is not in progress");
|
||||
|
||||
/* For replica we call pg_stop_backup() on master */
|
||||
conn = (current.from_replica) ? master_conn : backup_conn;
|
||||
/* For 9.5 replica we call pg_stop_backup() on master */
|
||||
if (current.from_replica && exclusive_backup)
|
||||
conn = master_conn;
|
||||
else
|
||||
conn = backup_conn;
|
||||
|
||||
/* Remove annoying NOTICE messages generated by backend */
|
||||
res = pgut_execute(conn, "SET client_min_messages = warning;",
|
||||
0, NULL);
|
||||
PQclear(res);
|
||||
|
||||
/* Create restore point */
|
||||
if (backup != NULL)
|
||||
/* Create restore point
|
||||
* only if it`s backup from master, or exclusive replica(wich connects to master)
|
||||
*/
|
||||
if (backup != NULL && (!current.from_replica || (current.from_replica && exclusive_backup)))
|
||||
{
|
||||
const char *params[1];
|
||||
char name[1024];
|
||||
@ -1741,7 +1795,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
/* Extract timeline and LSN from the result */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
restore_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
//restore_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
@ -1762,14 +1816,29 @@ pg_stop_backup(pgBackup *backup)
|
||||
* Stop the non-exclusive backup. Besides stop_lsn it returns from
|
||||
* pg_stop_backup(false) copy of the backup label and tablespace map
|
||||
* so they can be written to disk by the caller.
|
||||
* In case of backup from replica >= 9.6 we do not trust minRecPoint
|
||||
* and stop_backup LSN, so we use latest replayed LSN as STOP LSN.
|
||||
*/
|
||||
stop_backup_query = "SELECT"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
" lsn,"
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
" FROM pg_catalog.pg_stop_backup(false)";
|
||||
if (current.from_replica)
|
||||
stop_backup_query = "SELECT"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
" pg_catalog.pg_last_wal_replay_lsn(),"
|
||||
#else
|
||||
" pg_catalog.pg_last_xlog_replay_location(),"
|
||||
#endif
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
" FROM pg_catalog.pg_stop_backup(false)";
|
||||
else
|
||||
stop_backup_query = "SELECT"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
" current_timestamp(0)::timestamptz,"
|
||||
" lsn,"
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
" FROM pg_catalog.pg_stop_backup(false)";
|
||||
|
||||
}
|
||||
else
|
||||
@ -1857,13 +1926,13 @@ pg_stop_backup(pgBackup *backup)
|
||||
|
||||
if (!XRecOffIsValid(stop_backup_lsn))
|
||||
{
|
||||
stop_backup_lsn = restore_lsn;
|
||||
if (XRecOffIsNull(stop_backup_lsn))
|
||||
stop_backup_lsn = stop_backup_lsn + SizeOfXLogLongPHD;
|
||||
else
|
||||
elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
|
||||
(uint32) (stop_backup_lsn >> 32), (uint32) (stop_backup_lsn));
|
||||
}
|
||||
|
||||
if (!XRecOffIsValid(stop_backup_lsn))
|
||||
elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
|
||||
(uint32) (stop_backup_lsn >> 32), (uint32) (stop_backup_lsn));
|
||||
|
||||
/* Write backup_label and tablespace_map */
|
||||
if (!exclusive_backup)
|
||||
{
|
||||
@ -1964,7 +2033,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
stream_xlog_path[MAXPGPATH];
|
||||
|
||||
/* Wait for stop_lsn to be received by replica */
|
||||
if (backup->from_replica)
|
||||
if (current.from_replica)
|
||||
wait_replica_wal_lsn(stop_backup_lsn, false);
|
||||
/*
|
||||
* Wait for stop_lsn to be archived or streamed.
|
||||
@ -1987,10 +2056,12 @@ pg_stop_backup(pgBackup *backup)
|
||||
|
||||
elog(LOG, "Getting the Recovery Time from WAL");
|
||||
|
||||
/* iterate over WAL from stop_backup lsn to start_backup lsn */
|
||||
if (!read_recovery_info(xlog_path, backup->tli, xlog_seg_size,
|
||||
backup->start_lsn, backup->stop_lsn,
|
||||
&backup->recovery_time, &backup->recovery_xid))
|
||||
{
|
||||
elog(LOG, "Failed to find Recovery Time in WAL. Forced to trust current_timestamp");
|
||||
backup->recovery_time = recovery_time;
|
||||
backup->recovery_xid = recovery_xid;
|
||||
}
|
||||
@ -2099,7 +2170,7 @@ backup_files(void *arg)
|
||||
elog(ERROR, "interrupted during backup");
|
||||
|
||||
if (progress)
|
||||
elog(LOG, "Progress: (%d/%d). Process file \"%s\"",
|
||||
elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
|
||||
i + 1, n_backup_files_list, file->path);
|
||||
|
||||
/* stat file to check its current state */
|
||||
@ -2187,7 +2258,7 @@ backup_files(void *arg)
|
||||
{
|
||||
calc_file_checksum(file);
|
||||
/* ...and checksum is the same... */
|
||||
if (EQ_CRC32C(file->crc, (*prev_file)->crc))
|
||||
if (EQ_TRADITIONAL_CRC32(file->crc, (*prev_file)->crc))
|
||||
skip = true; /* ...skip copying file. */
|
||||
}
|
||||
if (skip ||
|
||||
@ -2204,7 +2275,7 @@ backup_files(void *arg)
|
||||
file->path, file->write_size);
|
||||
}
|
||||
else
|
||||
elog(LOG, "unexpected file type %d", buf.st_mode);
|
||||
elog(WARNING, "unexpected file type %d", buf.st_mode);
|
||||
}
|
||||
|
||||
/* Close connection */
|
||||
@ -2688,7 +2759,8 @@ get_last_ptrack_lsn(void)
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()", 0, NULL);
|
||||
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()",
|
||||
0, NULL);
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
|
@ -3,7 +3,7 @@
|
||||
* catalog.c: backup catalog operation
|
||||
*
|
||||
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -509,18 +509,22 @@ write_backup(pgBackup *backup)
|
||||
fp = fopen(conf_path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "Cannot open configuration file \"%s\": %s", conf_path,
|
||||
strerror(errno));
|
||||
strerror(errno));
|
||||
|
||||
pgBackupWriteControl(fp, backup);
|
||||
|
||||
fclose(fp);
|
||||
if (fflush(fp) != 0 ||
|
||||
fsync(fileno(fp)) != 0 ||
|
||||
fclose(fp))
|
||||
elog(ERROR, "Cannot write configuration file \"%s\": %s",
|
||||
conf_path, strerror(errno));
|
||||
}
|
||||
|
||||
/*
|
||||
* Output the list of files to backup catalog DATABASE_FILE_LIST
|
||||
*/
|
||||
void
|
||||
pgBackupWriteFileList(pgBackup *backup, parray *files, const char *root)
|
||||
write_backup_filelist(pgBackup *backup, parray *files, const char *root)
|
||||
{
|
||||
FILE *fp;
|
||||
char path[MAXPGPATH];
|
||||
@ -529,7 +533,7 @@ pgBackupWriteFileList(pgBackup *backup, parray *files, const char *root)
|
||||
|
||||
fp = fopen(path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot open file list \"%s\": %s", path,
|
||||
elog(ERROR, "Cannot open file list \"%s\": %s", path,
|
||||
strerror(errno));
|
||||
|
||||
print_file_list(fp, files, root);
|
||||
@ -537,7 +541,7 @@ pgBackupWriteFileList(pgBackup *backup, parray *files, const char *root)
|
||||
if (fflush(fp) != 0 ||
|
||||
fsync(fileno(fp)) != 0 ||
|
||||
fclose(fp))
|
||||
elog(ERROR, "cannot write file list \"%s\": %s", path, strerror(errno));
|
||||
elog(ERROR, "Cannot write file list \"%s\": %s", path, strerror(errno));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -257,14 +257,14 @@ readBackupCatalogConfigFile(void)
|
||||
{ 's', 0, "master-port", &(config->master_port), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-db", &(config->master_db), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-user", &(config->master_user), SOURCE_FILE_STRICT },
|
||||
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
/* other options */
|
||||
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
{'u', 0, "xlog-seg-size", &config->xlog_seg_size, SOURCE_FILE_STRICT},
|
||||
#endif
|
||||
/* archive options */
|
||||
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{0}
|
||||
};
|
||||
|
||||
|
208
src/data.c
208
src/data.c
@ -3,7 +3,7 @@
|
||||
* data.c: utils to parse and backup data pages
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -57,7 +57,7 @@ zlib_decompress(void *dst, size_t dst_size, void const *src, size_t src_size)
|
||||
*/
|
||||
static int32
|
||||
do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
CompressAlg alg, int level)
|
||||
CompressAlg alg, int level, const char **errormsg)
|
||||
{
|
||||
switch (alg)
|
||||
{
|
||||
@ -66,7 +66,13 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
return -1;
|
||||
#ifdef HAVE_LIBZ
|
||||
case ZLIB_COMPRESS:
|
||||
return zlib_compress(dst, dst_size, src, src_size, level);
|
||||
{
|
||||
int32 ret;
|
||||
ret = zlib_compress(dst, dst_size, src, src_size, level);
|
||||
if (ret < Z_OK && errormsg)
|
||||
*errormsg = zError(ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
case PGLZ_COMPRESS:
|
||||
return pglz_compress(src, src_size, dst, PGLZ_strategy_always);
|
||||
@ -81,7 +87,7 @@ do_compress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
*/
|
||||
static int32
|
||||
do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
CompressAlg alg)
|
||||
CompressAlg alg, const char **errormsg)
|
||||
{
|
||||
switch (alg)
|
||||
{
|
||||
@ -90,7 +96,13 @@ do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
return -1;
|
||||
#ifdef HAVE_LIBZ
|
||||
case ZLIB_COMPRESS:
|
||||
return zlib_decompress(dst, dst_size, src, src_size);
|
||||
{
|
||||
int32 ret;
|
||||
ret = zlib_decompress(dst, dst_size, src, src_size);
|
||||
if (ret < Z_OK && errormsg)
|
||||
*errormsg = zError(ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
case PGLZ_COMPRESS:
|
||||
return pglz_decompress(src, src_size, dst, dst_size);
|
||||
@ -99,6 +111,54 @@ do_decompress(void* dst, size_t dst_size, void const* src, size_t src_size,
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
#define ZLIB_MAGIC 0x78
|
||||
|
||||
/*
|
||||
* Before version 2.0.23 there was a bug in pro_backup that pages which compressed
|
||||
* size is exactly the same as original size are not treated as compressed.
|
||||
* This check tries to detect and decompress such pages.
|
||||
* There is no 100% criteria to determine whether page is compressed or not.
|
||||
* But at least we will do this check only for pages which will no pass validation step.
|
||||
*/
|
||||
static bool
|
||||
page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version)
|
||||
{
|
||||
PageHeader phdr;
|
||||
|
||||
phdr = (PageHeader) page;
|
||||
|
||||
/* First check if page header is valid (it seems to be fast enough check) */
|
||||
if (!(PageGetPageSize(phdr) == BLCKSZ &&
|
||||
PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION &&
|
||||
(phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
|
||||
phdr->pd_lower >= SizeOfPageHeaderData &&
|
||||
phdr->pd_lower <= phdr->pd_upper &&
|
||||
phdr->pd_upper <= phdr->pd_special &&
|
||||
phdr->pd_special <= BLCKSZ &&
|
||||
phdr->pd_special == MAXALIGN(phdr->pd_special)))
|
||||
{
|
||||
/* ... end only if it is invalid, then do more checks */
|
||||
if (backup_version >= 20023)
|
||||
{
|
||||
/* Versions 2.0.23 and higher don't have such bug */
|
||||
return false;
|
||||
}
|
||||
#ifdef HAVE_LIBZ
|
||||
/* For zlib we can check page magic:
|
||||
* https://stackoverflow.com/questions/9050260/what-does-a-zlib-header-look-like
|
||||
*/
|
||||
if (alg == ZLIB_COMPRESS && *(char*)page != ZLIB_MAGIC)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
/* otherwize let's try to decompress the page */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* When copying datafiles to backup we validate and compress them block
|
||||
* by block. Thus special header is required for each data block.
|
||||
@ -368,7 +428,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
BackupPageHeader header;
|
||||
size_t write_buffer_size = sizeof(header);
|
||||
char write_buffer[BLCKSZ+sizeof(header)];
|
||||
char compressed_page[BLCKSZ];
|
||||
char compressed_page[BLCKSZ*2]; /* compressed page may require more space than uncompressed */
|
||||
|
||||
if(page_state == SkipCurrentPage)
|
||||
return;
|
||||
@ -386,16 +446,22 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
}
|
||||
else
|
||||
{
|
||||
const char *errormsg = NULL;
|
||||
|
||||
/* The page was not truncated, so we need to compress it */
|
||||
header.compressed_size = do_compress(compressed_page, BLCKSZ,
|
||||
page, BLCKSZ, calg, clevel);
|
||||
header.compressed_size = do_compress(compressed_page, sizeof(compressed_page),
|
||||
page, BLCKSZ, calg, clevel,
|
||||
&errormsg);
|
||||
/* Something went wrong and errormsg was assigned, throw a warning */
|
||||
if (header.compressed_size < 0 && errormsg != NULL)
|
||||
elog(WARNING, "An error occured during compressing block %u of file \"%s\": %s",
|
||||
blknum, file->path, errormsg);
|
||||
|
||||
file->compress_alg = calg;
|
||||
file->read_size += BLCKSZ;
|
||||
Assert (header.compressed_size <= BLCKSZ);
|
||||
|
||||
/* The page was successfully compressed. */
|
||||
if (header.compressed_size > 0)
|
||||
if (header.compressed_size > 0 && header.compressed_size < BLCKSZ)
|
||||
{
|
||||
memcpy(write_buffer, &header, sizeof(header));
|
||||
memcpy(write_buffer + sizeof(header),
|
||||
@ -416,7 +482,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
blknum, header.compressed_size, write_buffer_size); */
|
||||
|
||||
/* Update CRC */
|
||||
COMP_CRC32C(*crc, write_buffer, write_buffer_size);
|
||||
COMP_TRADITIONAL_CRC32(*crc, write_buffer, write_buffer_size);
|
||||
|
||||
/* write data page */
|
||||
if(fwrite(write_buffer, 1, write_buffer_size, out) != write_buffer_size)
|
||||
@ -425,7 +491,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
|
||||
fclose(in);
|
||||
fclose(out);
|
||||
elog(ERROR, "File: %s, cannot write backup at block %u : %s",
|
||||
elog(ERROR, "File: %s, cannot write backup at block %u: %s",
|
||||
file->path, blknum, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
@ -476,13 +542,13 @@ backup_data_file(backup_files_arg* arguments,
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
file->write_size = 0;
|
||||
INIT_CRC32C(file->crc);
|
||||
INIT_TRADITIONAL_CRC32(file->crc);
|
||||
|
||||
/* open backup mode file for read */
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
FIN_CRC32C(file->crc);
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
|
||||
/*
|
||||
* If file is not found, this is not en error.
|
||||
@ -587,7 +653,7 @@ backup_data_file(backup_files_arg* arguments,
|
||||
to_path, strerror(errno));
|
||||
fclose(in);
|
||||
|
||||
FIN_CRC32C(file->crc);
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
|
||||
/*
|
||||
* If we have pagemap then file in the backup can't be a zero size.
|
||||
@ -613,7 +679,7 @@ backup_data_file(backup_files_arg* arguments,
|
||||
*/
|
||||
void
|
||||
restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
bool write_header)
|
||||
bool write_header, uint32 backup_version)
|
||||
{
|
||||
FILE *in = NULL;
|
||||
FILE *out = NULL;
|
||||
@ -656,6 +722,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
size_t read_len;
|
||||
DataPage compressed_page; /* used as read buffer */
|
||||
DataPage page;
|
||||
int32 uncompressed_size = 0;
|
||||
|
||||
/* File didn`t changed. Nothig to copy */
|
||||
if (file->write_size == BYTES_INVALID)
|
||||
@ -711,20 +778,32 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
|
||||
Assert(header.compressed_size <= BLCKSZ);
|
||||
|
||||
/* read a page from file */
|
||||
read_len = fread(compressed_page.data, 1,
|
||||
MAXALIGN(header.compressed_size), in);
|
||||
if (read_len != MAXALIGN(header.compressed_size))
|
||||
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
|
||||
blknum, file->path, read_len, header.compressed_size);
|
||||
|
||||
if (header.compressed_size != BLCKSZ)
|
||||
/*
|
||||
* if page size is smaller than BLCKSZ, decompress the page.
|
||||
* BUGFIX for versions < 2.0.23: if page size is equal to BLCKSZ.
|
||||
* we have to check, whether it is compressed or not using
|
||||
* page_may_be_compressed() function.
|
||||
*/
|
||||
if (header.compressed_size != BLCKSZ
|
||||
|| page_may_be_compressed(compressed_page.data, file->compress_alg,
|
||||
backup_version))
|
||||
{
|
||||
int32 uncompressed_size = 0;
|
||||
const char *errormsg = NULL;
|
||||
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
file->compress_alg, &errormsg);
|
||||
if (uncompressed_size < 0 && errormsg != NULL)
|
||||
elog(WARNING, "An error occured during decompressing block %u of file \"%s\": %s",
|
||||
blknum, file->path, errormsg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
|
||||
@ -748,7 +827,11 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
blknum, file->path, strerror(errno));
|
||||
}
|
||||
|
||||
if (header.compressed_size < BLCKSZ)
|
||||
/* if we uncompressed the page - write page.data,
|
||||
* if page wasn't compressed -
|
||||
* write what we've read - compressed_page.data
|
||||
*/
|
||||
if (uncompressed_size == BLCKSZ)
|
||||
{
|
||||
if (fwrite(page.data, 1, BLCKSZ, out) != BLCKSZ)
|
||||
elog(ERROR, "cannot write block %u of \"%s\": %s",
|
||||
@ -756,7 +839,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* if page wasn't compressed, we've read full block */
|
||||
/* */
|
||||
if (fwrite(compressed_page.data, 1, BLCKSZ, out) != BLCKSZ)
|
||||
elog(ERROR, "cannot write block %u of \"%s\": %s",
|
||||
blknum, file->path, strerror(errno));
|
||||
@ -839,7 +922,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
struct stat st;
|
||||
pg_crc32 crc;
|
||||
|
||||
INIT_CRC32C(crc);
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
@ -849,7 +932,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
FIN_CRC32C(crc);
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* maybe deleted, it's not error */
|
||||
@ -898,7 +981,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
strerror(errno_tmp));
|
||||
}
|
||||
/* update CRC */
|
||||
COMP_CRC32C(crc, buf, read_len);
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
|
||||
file->read_size += read_len;
|
||||
}
|
||||
@ -925,14 +1008,14 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
strerror(errno_tmp));
|
||||
}
|
||||
/* update CRC */
|
||||
COMP_CRC32C(crc, buf, read_len);
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
|
||||
file->read_size += read_len;
|
||||
}
|
||||
|
||||
file->write_size = (int64) file->read_size;
|
||||
/* finish CRC calculation and store into pgFile */
|
||||
FIN_CRC32C(crc);
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* update file permission */
|
||||
@ -954,22 +1037,6 @@ copy_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move file from one backup to another.
|
||||
* We do not apply compression to these files, because
|
||||
* it is either small control file or already compressed cfs file.
|
||||
*/
|
||||
void
|
||||
move_file(const char *from_root, const char *to_root, pgFile *file)
|
||||
{
|
||||
char to_path[MAXPGPATH];
|
||||
|
||||
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
|
||||
if (rename(file->path, to_path) == -1)
|
||||
elog(ERROR, "Cannot move file \"%s\" to path \"%s\": %s",
|
||||
file->path, to_path, strerror(errno));
|
||||
}
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/*
|
||||
* Show error during work with compressed file
|
||||
@ -1350,7 +1417,7 @@ calc_file_checksum(pgFile *file)
|
||||
pg_crc32 crc;
|
||||
|
||||
Assert(S_ISREG(file->mode));
|
||||
INIT_CRC32C(crc);
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
@ -1360,7 +1427,7 @@ calc_file_checksum(pgFile *file)
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
FIN_CRC32C(crc);
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* maybe deleted, it's not error */
|
||||
@ -1387,7 +1454,7 @@ calc_file_checksum(pgFile *file)
|
||||
break;
|
||||
|
||||
/* update CRC */
|
||||
COMP_CRC32C(crc, buf, read_len);
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
|
||||
file->write_size += read_len;
|
||||
file->read_size += read_len;
|
||||
@ -1402,7 +1469,7 @@ calc_file_checksum(pgFile *file)
|
||||
}
|
||||
|
||||
/* finish CRC calculation and store into pgFile */
|
||||
FIN_CRC32C(crc);
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
fclose(in);
|
||||
@ -1501,8 +1568,8 @@ validate_one_page(Page page, pgFile *file,
|
||||
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
|
||||
|
||||
if (lsn > stop_lsn)
|
||||
elog(WARNING, "File: %s, block %u, checksum is not enabled."
|
||||
"page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
elog(WARNING, "File: %s, block %u, checksum is not enabled. "
|
||||
"Page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
|
||||
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
|
||||
else
|
||||
@ -1515,8 +1582,8 @@ validate_one_page(Page page, pgFile *file,
|
||||
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
|
||||
|
||||
if (lsn > stop_lsn)
|
||||
elog(WARNING, "File: %s, block %u, checksum is correct."
|
||||
"page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
elog(WARNING, "File: %s, block %u, checksum is correct. "
|
||||
"Page is from future: pageLSN %X/%X stopLSN %X/%X",
|
||||
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
|
||||
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
|
||||
else
|
||||
@ -1529,11 +1596,14 @@ validate_one_page(Page page, pgFile *file,
|
||||
|
||||
/* Valiate pages of datafile in backup one by one */
|
||||
bool
|
||||
check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version, uint32 backup_version)
|
||||
{
|
||||
size_t read_len = 0;
|
||||
bool is_valid = true;
|
||||
FILE *in;
|
||||
pg_crc32 crc;
|
||||
bool use_crc32c = (backup_version <= 20021);
|
||||
|
||||
elog(VERBOSE, "validate relation blocks for file %s", file->name);
|
||||
|
||||
@ -1550,6 +1620,9 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
file->path, strerror(errno));
|
||||
}
|
||||
|
||||
/* calc CRC of backup file */
|
||||
INIT_FILE_CRC32(use_crc32c, crc);
|
||||
|
||||
/* read and validate pages one by one */
|
||||
while (true)
|
||||
{
|
||||
@ -1574,6 +1647,8 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
blknum, file->path, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
COMP_FILE_CRC32(use_crc32c, crc, &header, read_len);
|
||||
|
||||
if (header.block < blknum)
|
||||
elog(ERROR, "backup is broken at file->path %s block %u",
|
||||
file->path, blknum);
|
||||
@ -1595,19 +1670,34 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
|
||||
blknum, file->path, read_len, header.compressed_size);
|
||||
|
||||
if (header.compressed_size != BLCKSZ)
|
||||
COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len);
|
||||
|
||||
if (header.compressed_size != BLCKSZ
|
||||
|| page_may_be_compressed(compressed_page.data, file->compress_alg,
|
||||
backup_version))
|
||||
{
|
||||
int32 uncompressed_size = 0;
|
||||
const char *errormsg = NULL;
|
||||
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
file->compress_alg,
|
||||
&errormsg);
|
||||
if (uncompressed_size < 0 && errormsg != NULL)
|
||||
elog(WARNING, "An error occured during decompressing block %u of file \"%s\": %s",
|
||||
blknum, file->path, errormsg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
{
|
||||
if (header.compressed_size == BLCKSZ)
|
||||
{
|
||||
is_valid = false;
|
||||
continue;
|
||||
}
|
||||
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
|
||||
file->path, uncompressed_size);
|
||||
|
||||
}
|
||||
if (validate_one_page(page.data, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
|
||||
is_valid = false;
|
||||
@ -1620,5 +1710,15 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
}
|
||||
}
|
||||
|
||||
FIN_FILE_CRC32(use_crc32c, crc);
|
||||
fclose(in);
|
||||
|
||||
if (crc != file->crc)
|
||||
{
|
||||
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
|
||||
file->path, file->crc, crc);
|
||||
is_valid = false;
|
||||
}
|
||||
|
||||
return is_valid;
|
||||
}
|
||||
|
29
src/delete.c
29
src/delete.c
@ -3,7 +3,7 @@
|
||||
* delete.c: delete backup files.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -14,7 +14,6 @@
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static int delete_backup_files(pgBackup *backup);
|
||||
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
|
||||
uint32 xlog_seg_size);
|
||||
|
||||
@ -245,7 +244,7 @@ do_retention_purge(void)
|
||||
* Delete backup files of the backup and update the status of the backup to
|
||||
* BACKUP_STATUS_DELETED.
|
||||
*/
|
||||
static int
|
||||
void
|
||||
delete_backup_files(pgBackup *backup)
|
||||
{
|
||||
size_t i;
|
||||
@ -257,11 +256,15 @@ delete_backup_files(pgBackup *backup)
|
||||
* If the backup was deleted already, there is nothing to do.
|
||||
*/
|
||||
if (backup->status == BACKUP_STATUS_DELETED)
|
||||
return 0;
|
||||
{
|
||||
elog(WARNING, "Backup %s already deleted",
|
||||
base36enc(backup->start_time));
|
||||
return;
|
||||
}
|
||||
|
||||
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
|
||||
|
||||
elog(INFO, "delete: %s %s",
|
||||
elog(INFO, "Delete: %s %s",
|
||||
base36enc(backup->start_time), timestamp);
|
||||
|
||||
/*
|
||||
@ -283,17 +286,17 @@ delete_backup_files(pgBackup *backup)
|
||||
pgFile *file = (pgFile *) parray_get(files, i);
|
||||
|
||||
/* print progress */
|
||||
elog(VERBOSE, "delete file(%zd/%lu) \"%s\"", i + 1,
|
||||
elog(VERBOSE, "Delete file(%zd/%lu) \"%s\"", i + 1,
|
||||
(unsigned long) parray_num(files), file->path);
|
||||
|
||||
if (remove(file->path))
|
||||
{
|
||||
elog(WARNING, "can't remove \"%s\": %s", file->path,
|
||||
strerror(errno));
|
||||
parray_walk(files, pgFileFree);
|
||||
parray_free(files);
|
||||
|
||||
return 1;
|
||||
if (errno == ENOENT)
|
||||
elog(VERBOSE, "File \"%s\" is absent", file->path);
|
||||
else
|
||||
elog(ERROR, "Cannot remove \"%s\": %s", file->path,
|
||||
strerror(errno));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -301,7 +304,7 @@ delete_backup_files(pgBackup *backup)
|
||||
parray_free(files);
|
||||
backup->status = BACKUP_STATUS_DELETED;
|
||||
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
12
src/dir.c
12
src/dir.c
@ -3,7 +3,7 @@
|
||||
* dir.c: directory operation utility.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -262,7 +262,7 @@ delete_file:
|
||||
}
|
||||
|
||||
pg_crc32
|
||||
pgFileGetCRC(const char *file_path)
|
||||
pgFileGetCRC(const char *file_path, bool use_crc32c)
|
||||
{
|
||||
FILE *fp;
|
||||
pg_crc32 crc = 0;
|
||||
@ -277,20 +277,20 @@ pgFileGetCRC(const char *file_path)
|
||||
file_path, strerror(errno));
|
||||
|
||||
/* calc CRC of backup file */
|
||||
INIT_CRC32C(crc);
|
||||
INIT_FILE_CRC32(use_crc32c, crc);
|
||||
while ((len = fread(buf, 1, sizeof(buf), fp)) == sizeof(buf))
|
||||
{
|
||||
if (interrupted)
|
||||
elog(ERROR, "interrupted during CRC calculation");
|
||||
COMP_CRC32C(crc, buf, len);
|
||||
COMP_FILE_CRC32(use_crc32c, crc, buf, len);
|
||||
}
|
||||
errno_tmp = errno;
|
||||
if (!feof(fp))
|
||||
elog(WARNING, "cannot read \"%s\": %s", file_path,
|
||||
strerror(errno_tmp));
|
||||
if (len > 0)
|
||||
COMP_CRC32C(crc, buf, len);
|
||||
FIN_CRC32C(crc);
|
||||
COMP_FILE_CRC32(use_crc32c, crc, buf, len);
|
||||
FIN_FILE_CRC32(use_crc32c, crc);
|
||||
|
||||
fclose(fp);
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
* fetch.c
|
||||
* Functions for fetching files from PostgreSQL data directory
|
||||
*
|
||||
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
13
src/help.c
13
src/help.c
@ -2,7 +2,7 @@
|
||||
*
|
||||
* help.c
|
||||
*
|
||||
* Copyright (c) 2017-2017, Postgres Professional
|
||||
* Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -118,6 +118,7 @@ help_pg_probackup(void)
|
||||
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
|
||||
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
|
||||
printf(_(" [--replica-timeout=timeout]\n"));
|
||||
printf(_(" [--skip-block-validation]\n"));
|
||||
|
||||
printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
|
||||
printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n"));
|
||||
@ -127,12 +128,14 @@ help_pg_probackup(void)
|
||||
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
|
||||
printf(_(" [--restore-as-replica]\n"));
|
||||
printf(_(" [--no-validate]\n"));
|
||||
printf(_(" [--skip-block-validation]\n"));
|
||||
|
||||
printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME);
|
||||
printf(_(" [-i backup-id] [--progress]\n"));
|
||||
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
|
||||
printf(_(" [--recovery-target-name=target-name]\n"));
|
||||
printf(_(" [--timeline=timeline]\n"));
|
||||
printf(_(" [--skip-block-validation]\n"));
|
||||
|
||||
printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME);
|
||||
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
|
||||
@ -203,7 +206,8 @@ help_backup(void)
|
||||
printf(_(" [-w --no-password] [-W --password]\n"));
|
||||
printf(_(" [--master-db=db_name] [--master-host=host_name]\n"));
|
||||
printf(_(" [--master-port=port] [--master-user=user_name]\n"));
|
||||
printf(_(" [--replica-timeout=timeout]\n\n"));
|
||||
printf(_(" [--replica-timeout=timeout]\n"));
|
||||
printf(_(" [--skip-block-validation]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n"));
|
||||
@ -215,6 +219,7 @@ help_backup(void)
|
||||
printf(_(" -j, --threads=NUM number of parallel threads\n"));
|
||||
printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n"));
|
||||
printf(_(" --progress show progress\n"));
|
||||
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
|
||||
|
||||
printf(_("\n Logging options:\n"));
|
||||
printf(_(" --log-level-console=log-level-console\n"));
|
||||
@ -279,6 +284,7 @@ help_restore(void)
|
||||
printf(_(" [--immediate] [--recovery-target-name=target-name]\n"));
|
||||
printf(_(" [--recovery-target-action=pause|promote|shutdown]\n"));
|
||||
printf(_(" [--restore-as-replica] [--no-validate]\n\n"));
|
||||
printf(_(" [--skip-block-validation]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name name of the instance\n"));
|
||||
@ -305,6 +311,7 @@ help_restore(void)
|
||||
printf(_(" -R, --restore-as-replica write a minimal recovery.conf in the output directory\n"));
|
||||
printf(_(" to ease setting up a standby server\n"));
|
||||
printf(_(" --no-validate disable backup validation during restore\n"));
|
||||
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
|
||||
|
||||
printf(_("\n Logging options:\n"));
|
||||
printf(_(" --log-level-console=log-level-console\n"));
|
||||
@ -335,6 +342,7 @@ help_validate(void)
|
||||
printf(_(" [-i backup-id] [--progress]\n"));
|
||||
printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n"));
|
||||
printf(_(" [--timeline=timeline]\n\n"));
|
||||
printf(_(" [--skip-block-validation]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name name of the instance\n"));
|
||||
@ -348,6 +356,7 @@ help_validate(void)
|
||||
printf(_(" --timeline=timeline recovering into a particular timeline\n"));
|
||||
printf(_(" --recovery-target-name=target-name\n"));
|
||||
printf(_(" the named restore point to which recovery will proceed\n"));
|
||||
printf(_(" --skip-block-validation set to validate only file-level checksum\n"));
|
||||
|
||||
printf(_("\n Logging options:\n"));
|
||||
printf(_(" --log-level-console=log-level-console\n"));
|
||||
|
@ -3,7 +3,7 @@
|
||||
* init.c: - initialize backup catalog.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
161
src/merge.c
161
src/merge.c
@ -59,7 +59,7 @@ do_merge(time_t backup_id)
|
||||
if (instance_name == NULL)
|
||||
elog(ERROR, "required parameter is not specified: --instance");
|
||||
|
||||
elog(LOG, "Merge started");
|
||||
elog(INFO, "Merge started");
|
||||
|
||||
catalog_lock();
|
||||
|
||||
@ -77,7 +77,8 @@ do_merge(time_t backup_id)
|
||||
{
|
||||
if (backup->status != BACKUP_STATUS_OK &&
|
||||
/* It is possible that previous merging was interrupted */
|
||||
backup->status != BACKUP_STATUS_MERGING)
|
||||
backup->status != BACKUP_STATUS_MERGING &&
|
||||
backup->status != BACKUP_STATUS_DELETING)
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(backup->start_time), status2str(backup->status));
|
||||
|
||||
@ -128,17 +129,21 @@ do_merge(time_t backup_id)
|
||||
*/
|
||||
for (i = full_backup_idx; i > dest_backup_idx; i--)
|
||||
{
|
||||
pgBackup *to_backup = (pgBackup *) parray_get(backups, i);
|
||||
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
|
||||
|
||||
merge_backups(to_backup, from_backup);
|
||||
full_backup = (pgBackup *) parray_get(backups, i);
|
||||
merge_backups(full_backup, from_backup);
|
||||
}
|
||||
|
||||
pgBackupValidate(full_backup);
|
||||
if (full_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
elog(ERROR, "Merging of backup %s failed", base36enc(backup_id));
|
||||
|
||||
/* cleanup */
|
||||
parray_walk(backups, pgBackupFree);
|
||||
parray_free(backups);
|
||||
|
||||
elog(LOG, "Merge completed");
|
||||
elog(INFO, "Merge of backup %s completed", base36enc(backup_id));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -166,7 +171,36 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
int i;
|
||||
bool merge_isok = true;
|
||||
|
||||
elog(LOG, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
|
||||
elog(INFO, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
|
||||
|
||||
/*
|
||||
* Validate to_backup only if it is BACKUP_STATUS_OK. If it has
|
||||
* BACKUP_STATUS_MERGING status then it isn't valid backup until merging
|
||||
* finished.
|
||||
*/
|
||||
if (to_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
pgBackupValidate(to_backup);
|
||||
if (to_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
elog(ERROR, "Interrupt merging");
|
||||
}
|
||||
|
||||
/*
|
||||
* It is OK to validate from_backup if it has BACKUP_STATUS_OK or
|
||||
* BACKUP_STATUS_MERGING status.
|
||||
*/
|
||||
Assert(from_backup->status == BACKUP_STATUS_OK ||
|
||||
from_backup->status == BACKUP_STATUS_MERGING);
|
||||
pgBackupValidate(from_backup);
|
||||
if (from_backup->status == BACKUP_STATUS_CORRUPT)
|
||||
elog(ERROR, "Interrupt merging");
|
||||
|
||||
/*
|
||||
* Previous merging was interrupted during deleting source backup. It is
|
||||
* safe just to delete it again.
|
||||
*/
|
||||
if (from_backup->status == BACKUP_STATUS_DELETING)
|
||||
goto delete_source_backup;
|
||||
|
||||
to_backup->status = BACKUP_STATUS_MERGING;
|
||||
write_backup_status(to_backup);
|
||||
@ -246,68 +280,10 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
if (!merge_isok)
|
||||
elog(ERROR, "Data files merging failed");
|
||||
|
||||
/*
|
||||
* Files were copied into to_backup and deleted from from_backup. Remove
|
||||
* remaining directories from from_backup.
|
||||
*/
|
||||
parray_qsort(files, pgFileComparePathDesc);
|
||||
for (i = 0; i < parray_num(files); i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(files, i);
|
||||
|
||||
if (!S_ISDIR(file->mode))
|
||||
continue;
|
||||
|
||||
if (rmdir(file->path))
|
||||
elog(ERROR, "Could not remove directory \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
}
|
||||
if (rmdir(from_database_path))
|
||||
elog(ERROR, "Could not remove directory \"%s\": %s",
|
||||
from_database_path, strerror(errno));
|
||||
if (unlink(control_file))
|
||||
elog(ERROR, "Could not remove file \"%s\": %s",
|
||||
control_file, strerror(errno));
|
||||
|
||||
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
|
||||
BACKUP_CONTROL_FILE);
|
||||
if (unlink(control_file))
|
||||
elog(ERROR, "Could not remove file \"%s\": %s",
|
||||
control_file, strerror(errno));
|
||||
|
||||
if (rmdir(from_backup_path))
|
||||
elog(ERROR, "Could not remove directory \"%s\": %s",
|
||||
from_backup_path, strerror(errno));
|
||||
|
||||
/*
|
||||
* Delete files which are not in from_backup file list.
|
||||
*/
|
||||
for (i = 0; i < parray_num(to_files); i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(to_files, i);
|
||||
|
||||
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
|
||||
{
|
||||
pgFileDelete(file);
|
||||
elog(LOG, "Deleted \"%s\"", file->path);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Rename FULL backup directory.
|
||||
*/
|
||||
if (rename(to_backup_path, from_backup_path) == -1)
|
||||
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
|
||||
to_backup_path, from_backup_path, strerror(errno));
|
||||
|
||||
/*
|
||||
* Update to_backup metadata.
|
||||
*/
|
||||
pgBackupCopy(to_backup, from_backup);
|
||||
/* Correct metadata */
|
||||
to_backup->backup_mode = BACKUP_MODE_FULL;
|
||||
to_backup->status = BACKUP_STATUS_OK;
|
||||
to_backup->parent_backup = INVALID_BACKUP_ID;
|
||||
/* Compute summary of size of regular files in the backup */
|
||||
to_backup->data_bytes = 0;
|
||||
for (i = 0; i < parray_num(files); i++)
|
||||
@ -328,8 +304,47 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
else
|
||||
to_backup->wal_bytes = BYTES_INVALID;
|
||||
|
||||
pgBackupWriteFileList(to_backup, files, from_database_path);
|
||||
write_backup_status(to_backup);
|
||||
write_backup_filelist(to_backup, files, from_database_path);
|
||||
write_backup(to_backup);
|
||||
|
||||
delete_source_backup:
|
||||
/*
|
||||
* Files were copied into to_backup. It is time to remove source backup
|
||||
* entirely.
|
||||
*/
|
||||
delete_backup_files(from_backup);
|
||||
|
||||
/*
|
||||
* Delete files which are not in from_backup file list.
|
||||
*/
|
||||
for (i = 0; i < parray_num(to_files); i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(to_files, i);
|
||||
|
||||
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
|
||||
{
|
||||
pgFileDelete(file);
|
||||
elog(VERBOSE, "Deleted \"%s\"", file->path);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Rename FULL backup directory.
|
||||
*/
|
||||
elog(INFO, "Rename %s to %s", to_backup_id, from_backup_id);
|
||||
if (rename(to_backup_path, from_backup_path) == -1)
|
||||
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
|
||||
to_backup_path, from_backup_path, strerror(errno));
|
||||
|
||||
/*
|
||||
* Merging finished, now we can safely update ID of the destination backup.
|
||||
*/
|
||||
pgBackupCopy(to_backup, from_backup);
|
||||
/* Correct metadata */
|
||||
to_backup->backup_mode = BACKUP_MODE_FULL;
|
||||
to_backup->status = BACKUP_STATUS_OK;
|
||||
to_backup->parent_backup = INVALID_BACKUP_ID;
|
||||
write_backup(to_backup);
|
||||
|
||||
/* Cleanup */
|
||||
pfree(threads_args);
|
||||
@ -460,14 +475,16 @@ merge_files(void *arg)
|
||||
file->path = to_path_tmp;
|
||||
|
||||
/* Decompress first/target file */
|
||||
restore_data_file(tmp_file_path, file, false, false);
|
||||
restore_data_file(tmp_file_path, file, false, false,
|
||||
parse_program_version(to_backup->program_version));
|
||||
|
||||
file->path = prev_path;
|
||||
}
|
||||
/* Merge second/source file with first/target file */
|
||||
restore_data_file(tmp_file_path, file,
|
||||
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
||||
false);
|
||||
false,
|
||||
parse_program_version(from_backup->program_version));
|
||||
|
||||
elog(VERBOSE, "Compress file and save it to the directory \"%s\"",
|
||||
argument->to_root);
|
||||
@ -499,19 +516,19 @@ merge_files(void *arg)
|
||||
/* We can merge in-place here */
|
||||
restore_data_file(to_path_tmp, file,
|
||||
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
||||
true);
|
||||
true,
|
||||
parse_program_version(from_backup->program_version));
|
||||
|
||||
/*
|
||||
* We need to calculate write_size, restore_data_file() doesn't
|
||||
* do that.
|
||||
*/
|
||||
file->write_size = pgFileSize(to_path_tmp);
|
||||
file->crc = pgFileGetCRC(to_path_tmp);
|
||||
file->crc = pgFileGetCRC(to_path_tmp, false);
|
||||
}
|
||||
pgFileDelete(file);
|
||||
}
|
||||
else
|
||||
move_file(argument->from_root, argument->to_root, file);
|
||||
copy_file(argument->from_root, argument->to_root, file);
|
||||
|
||||
if (file->write_size != BYTES_INVALID)
|
||||
elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes",
|
||||
|
@ -237,10 +237,11 @@ doExtractPageMap(void *arg)
|
||||
*/
|
||||
if (XLogRecPtrIsInvalid(found))
|
||||
{
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X",
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X. %s",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint));
|
||||
(uint32) (extract_arg->startpoint),
|
||||
(xlogreader->errormsg_buf[0] != '\0')?xlogreader->errormsg_buf:"");
|
||||
PrintXLogCorruptionMsg(private_data, ERROR);
|
||||
}
|
||||
extract_arg->startpoint = found;
|
||||
@ -793,6 +794,10 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
private_data = (XLogPageReadPrivate *) xlogreader->private_data;
|
||||
targetPageOff = targetPagePtr % private_data->xlog_seg_size;
|
||||
|
||||
if (interrupted)
|
||||
elog(ERROR, "Thread [%d]: Interrupted during WAL reading",
|
||||
private_data->thread_num);
|
||||
|
||||
/*
|
||||
* See if we need to switch to a new segment because the requested record
|
||||
* is not in the currently open one.
|
||||
|
@ -3,7 +3,7 @@
|
||||
* pg_probackup.c: Backup/Recovery manager for PostgreSQL.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -17,7 +17,7 @@
|
||||
|
||||
#include "utils/thread.h"
|
||||
|
||||
const char *PROGRAM_VERSION = "2.0.21";
|
||||
const char *PROGRAM_VERSION = "2.0.24";
|
||||
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
|
||||
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
|
||||
|
||||
@ -91,6 +91,8 @@ static pgRecoveryTarget *recovery_target_options = NULL;
|
||||
bool restore_as_replica = false;
|
||||
bool restore_no_validate = false;
|
||||
|
||||
bool skip_block_validation = false;
|
||||
|
||||
/* delete options */
|
||||
bool delete_wal = false;
|
||||
bool delete_expired = false;
|
||||
@ -181,6 +183,7 @@ static pgut_option options[] =
|
||||
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMDLINE },
|
||||
{ 'b', 27, "no-validate", &restore_no_validate, SOURCE_CMDLINE },
|
||||
{ 's', 28, "lsn", &target_lsn, SOURCE_CMDLINE },
|
||||
{ 'b', 29, "skip-block-validation", &skip_block_validation, SOURCE_CMDLINE },
|
||||
/* delete options */
|
||||
{ 'b', 130, "wal", &delete_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
|
||||
|
@ -3,7 +3,7 @@
|
||||
* pg_probackup.h: Backup/Recovery manager for PostgreSQL.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -58,6 +58,10 @@
|
||||
#define XID_FMT "%u"
|
||||
#endif
|
||||
|
||||
/* Check if an XLogRecPtr value is pointed to 0 offset */
|
||||
#define XRecOffIsNull(xlrp) \
|
||||
((xlrp) % XLOG_BLCKSZ == 0)
|
||||
|
||||
typedef enum CompressAlg
|
||||
{
|
||||
NOT_DEFINED_COMPRESS = 0,
|
||||
@ -66,6 +70,28 @@ typedef enum CompressAlg
|
||||
ZLIB_COMPRESS,
|
||||
} CompressAlg;
|
||||
|
||||
#define INIT_FILE_CRC32(use_crc32c, crc) \
|
||||
do { \
|
||||
if (use_crc32c) \
|
||||
INIT_CRC32C(crc); \
|
||||
else \
|
||||
INIT_TRADITIONAL_CRC32(crc); \
|
||||
} while (0)
|
||||
#define COMP_FILE_CRC32(use_crc32c, crc, data, len) \
|
||||
do { \
|
||||
if (use_crc32c) \
|
||||
COMP_CRC32C((crc), (data), (len)); \
|
||||
else \
|
||||
COMP_TRADITIONAL_CRC32(crc, data, len); \
|
||||
} while (0)
|
||||
#define FIN_FILE_CRC32(use_crc32c, crc) \
|
||||
do { \
|
||||
if (use_crc32c) \
|
||||
FIN_CRC32C(crc); \
|
||||
else \
|
||||
FIN_TRADITIONAL_CRC32(crc); \
|
||||
} while (0)
|
||||
|
||||
/* Information about single file (or dir) in backup */
|
||||
typedef struct pgFile
|
||||
{
|
||||
@ -346,6 +372,7 @@ extern bool exclusive_backup;
|
||||
|
||||
/* restore options */
|
||||
extern bool restore_as_replica;
|
||||
extern bool skip_block_validation;
|
||||
|
||||
/* delete options */
|
||||
extern bool delete_wal;
|
||||
@ -398,7 +425,6 @@ extern int do_restore_or_validate(time_t target_backup_id,
|
||||
extern bool satisfy_timeline(const parray *timelines, const pgBackup *backup);
|
||||
extern bool satisfy_recovery_target(const pgBackup *backup,
|
||||
const pgRecoveryTarget *rt);
|
||||
extern parray * readTimeLineHistory_probackup(TimeLineID targetTLI);
|
||||
extern pgRecoveryTarget *parseRecoveryTargetOptions(
|
||||
const char *target_time, const char *target_xid,
|
||||
const char *target_inclusive, TimeLineID target_tli, const char* target_lsn,
|
||||
@ -432,6 +458,7 @@ extern int do_show(time_t requested_backup_id);
|
||||
|
||||
/* in delete.c */
|
||||
extern void do_delete(time_t backup_id);
|
||||
extern void delete_backup_files(pgBackup *backup);
|
||||
extern int do_retention_purge(void);
|
||||
extern int do_delete_instance(void);
|
||||
|
||||
@ -462,10 +489,11 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
|
||||
TimeLineID tli);
|
||||
extern void catalog_lock(void);
|
||||
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
|
||||
extern void pgBackupWriteFileList(pgBackup *backup, parray *files,
|
||||
extern void write_backup_filelist(pgBackup *backup, parray *files,
|
||||
const char *root);
|
||||
|
||||
extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir);
|
||||
extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len,
|
||||
const char *subdir);
|
||||
extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
|
||||
const char *subdir1, const char *subdir2);
|
||||
extern int pgBackupCreateDir(pgBackup *backup);
|
||||
@ -510,7 +538,7 @@ extern pgFile *pgFileNew(const char *path, bool omit_symlink, bool is_extra);
|
||||
extern pgFile *pgFileInit(const char *path);
|
||||
extern void pgFileDelete(pgFile *file);
|
||||
extern void pgFileFree(void *file);
|
||||
extern pg_crc32 pgFileGetCRC(const char *file_path);
|
||||
extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c);
|
||||
extern int pgFileComparePath(const void *f1, const void *f2);
|
||||
extern int pgFileComparePathDesc(const void *f1, const void *f2);
|
||||
extern int pgFileCompareLinked(const void *f1, const void *f2);
|
||||
@ -524,9 +552,9 @@ extern bool backup_data_file(backup_files_arg* arguments,
|
||||
CompressAlg calg, int clevel);
|
||||
extern void restore_data_file(const char *to_path,
|
||||
pgFile *file, bool allow_truncate,
|
||||
bool write_header);
|
||||
bool write_header,
|
||||
uint32 backup_version);
|
||||
extern bool copy_file(const char *from_root, const char *to_root, pgFile *file);
|
||||
extern void move_file(const char *from_root, const char *to_root, pgFile *file);
|
||||
extern void push_wal_file(const char *from_path, const char *to_path,
|
||||
bool is_compress, bool overwrite);
|
||||
extern void get_wal_file(const char *from_path, const char *to_path);
|
||||
@ -534,8 +562,8 @@ extern void get_wal_file(const char *from_path, const char *to_path);
|
||||
extern bool calc_file_checksum(pgFile *file);
|
||||
|
||||
extern bool check_file_pages(pgFile* file,
|
||||
XLogRecPtr stop_lsn, uint32 checksum_version);
|
||||
|
||||
XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version, uint32 backup_version);
|
||||
/* parsexlog.c */
|
||||
extern void extractPageMap(const char *archivedir,
|
||||
TimeLineID tli, uint32 seg_size,
|
||||
@ -562,6 +590,7 @@ extern uint64 get_system_identifier(char *pgdata);
|
||||
extern uint64 get_remote_system_identifier(PGconn *conn);
|
||||
extern uint32 get_data_checksum_version(bool safe);
|
||||
extern uint32 get_xlog_seg_size(char *pgdata_path);
|
||||
extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn);
|
||||
|
||||
extern void sanityChecks(void);
|
||||
extern void time2iso(char *buf, size_t len, time_t time);
|
||||
@ -571,7 +600,8 @@ extern void remove_not_digit(char *buf, size_t len, const char *str);
|
||||
extern const char *base36enc(long unsigned int value);
|
||||
extern char *base36enc_dup(long unsigned int value);
|
||||
extern long unsigned int base36dec(const char *text);
|
||||
extern int parse_server_version(char *server_version_str);
|
||||
extern uint32 parse_server_version(const char *server_version_str);
|
||||
extern uint32 parse_program_version(const char *program_version);
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _DEBUG
|
||||
|
@ -3,7 +3,7 @@
|
||||
* restore.c: restore DB cluster and archived WAL.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -33,6 +33,7 @@ static void restore_backup(pgBackup *backup);
|
||||
static void create_recovery_conf(time_t backup_id,
|
||||
pgRecoveryTarget *rt,
|
||||
pgBackup *backup);
|
||||
static parray *read_timeline_history(TimeLineID targetTLI);
|
||||
static void *restore_files(void *arg);
|
||||
static void remove_deleted_files(pgBackup *backup);
|
||||
|
||||
@ -138,7 +139,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
|
||||
elog(LOG, "target timeline ID = %u", rt->recovery_target_tli);
|
||||
/* Read timeline history files from archives */
|
||||
timelines = readTimeLineHistory_probackup(rt->recovery_target_tli);
|
||||
timelines = read_timeline_history(rt->recovery_target_tli);
|
||||
|
||||
if (!satisfy_timeline(timelines, current_backup))
|
||||
{
|
||||
@ -149,6 +150,9 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
/* Try to find another backup that satisfies target timeline */
|
||||
continue;
|
||||
}
|
||||
|
||||
parray_walk(timelines, pfree);
|
||||
parray_free(timelines);
|
||||
}
|
||||
|
||||
if (!satisfy_recovery_target(current_backup, rt))
|
||||
@ -652,7 +656,8 @@ restore_files(void *arg)
|
||||
file->path + strlen(from_root) + 1);
|
||||
restore_data_file(to_path, file,
|
||||
arguments->backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
||||
false);
|
||||
false,
|
||||
parse_program_version(arguments->backup->program_version));
|
||||
}
|
||||
else if (file->is_extra)
|
||||
copy_file(from_root, file->extradir, file);
|
||||
@ -763,7 +768,7 @@ create_recovery_conf(time_t backup_id,
|
||||
* based on readTimeLineHistory() in timeline.c
|
||||
*/
|
||||
parray *
|
||||
readTimeLineHistory_probackup(TimeLineID targetTLI)
|
||||
read_timeline_history(TimeLineID targetTLI)
|
||||
{
|
||||
parray *result;
|
||||
char path[MAXPGPATH];
|
||||
@ -852,8 +857,7 @@ readTimeLineHistory_probackup(TimeLineID targetTLI)
|
||||
entry = pgut_new(TimeLineHistoryEntry);
|
||||
entry->tli = targetTLI;
|
||||
/* LSN in target timeline is valid */
|
||||
/* TODO ensure that -1UL --> -1L fix is correct */
|
||||
entry->end = (uint32) (-1L << 32) | -1L;
|
||||
entry->end = InvalidXLogRecPtr;
|
||||
parray_insert(result, 0, entry);
|
||||
|
||||
return result;
|
||||
@ -885,7 +889,8 @@ satisfy_timeline(const parray *timelines, const pgBackup *backup)
|
||||
|
||||
timeline = (TimeLineHistoryEntry *) parray_get(timelines, i);
|
||||
if (backup->tli == timeline->tli &&
|
||||
backup->stop_lsn < timeline->end)
|
||||
(XLogRecPtrIsInvalid(timeline->end) ||
|
||||
backup->stop_lsn < timeline->end))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
124
src/util.c
124
src/util.c
@ -3,7 +3,7 @@
|
||||
* util.c: log messages to log file or stderr, and misc code.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -14,6 +14,8 @@
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
const char *
|
||||
base36enc(long unsigned int value)
|
||||
{
|
||||
@ -100,6 +102,44 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size)
|
||||
checkControlFile(ControlFile);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write ControlFile to pg_control
|
||||
*/
|
||||
static void
|
||||
writeControlFile(ControlFileData *ControlFile, char *path)
|
||||
{
|
||||
int fd;
|
||||
char *buffer = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
int ControlFileSize = PG_CONTROL_FILE_SIZE;
|
||||
#else
|
||||
int ControlFileSize = PG_CONTROL_SIZE;
|
||||
#endif
|
||||
|
||||
/* copy controlFileSize */
|
||||
buffer = pg_malloc(ControlFileSize);
|
||||
memcpy(buffer, ControlFile, sizeof(ControlFileData));
|
||||
|
||||
/* Write pg_control */
|
||||
unlink(path);
|
||||
fd = open(path,
|
||||
O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
|
||||
S_IRUSR | S_IWUSR);
|
||||
|
||||
if (fd < 0)
|
||||
elog(ERROR, "Failed to open file: %s", path);
|
||||
|
||||
if (write(fd, buffer, ControlFileSize) != ControlFileSize)
|
||||
elog(ERROR, "Failed to overwrite file: %s", path);
|
||||
|
||||
if (fsync(fd) != 0)
|
||||
elog(ERROR, "Failed to fsync file: %s", path);
|
||||
|
||||
close(fd);
|
||||
pg_free(buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Utility shared by backup and restore to fetch the current timeline
|
||||
* used by a node.
|
||||
@ -250,6 +290,52 @@ get_data_checksum_version(bool safe)
|
||||
return ControlFile.data_checksum_version;
|
||||
}
|
||||
|
||||
/* MinRecoveryPoint 'as-is' is not to be trusted */
|
||||
void
|
||||
set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn)
|
||||
{
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
size_t size;
|
||||
char fullpath[MAXPGPATH];
|
||||
|
||||
/* First fetch file content */
|
||||
buffer = slurpFile(pgdata, XLOG_CONTROL_FILE, &size, false);
|
||||
if (buffer == NULL)
|
||||
elog(ERROR, "ERROR");
|
||||
|
||||
digestControlFile(&ControlFile, buffer, size);
|
||||
|
||||
elog(LOG, "Current minRecPoint %X/%X",
|
||||
(uint32) (ControlFile.minRecoveryPoint >> 32),
|
||||
(uint32) ControlFile.minRecoveryPoint);
|
||||
|
||||
elog(LOG, "Setting minRecPoint to %X/%X",
|
||||
(uint32) (stop_backup_lsn >> 32),
|
||||
(uint32) stop_backup_lsn);
|
||||
|
||||
ControlFile.minRecoveryPoint = stop_backup_lsn;
|
||||
|
||||
/* Update checksum in pg_control header */
|
||||
INIT_CRC32C(ControlFile.crc);
|
||||
COMP_CRC32C(ControlFile.crc,
|
||||
(char *) &ControlFile,
|
||||
offsetof(ControlFileData, crc));
|
||||
FIN_CRC32C(ControlFile.crc);
|
||||
|
||||
/* paranoia */
|
||||
checkControlFile(&ControlFile);
|
||||
|
||||
/* overwrite pg_control */
|
||||
snprintf(fullpath, sizeof(fullpath), "%s/%s", backup_path, XLOG_CONTROL_FILE);
|
||||
writeControlFile(&ControlFile, fullpath);
|
||||
|
||||
/* Update pg_control checksum in backup_list */
|
||||
file->crc = pgFileGetCRC(fullpath, false);
|
||||
|
||||
pg_free(buffer);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
|
||||
@ -280,12 +366,14 @@ time2iso(char *buf, size_t len, time_t time)
|
||||
}
|
||||
}
|
||||
|
||||
/* Parse string representation of the server version */
|
||||
int
|
||||
parse_server_version(char *server_version_str)
|
||||
/*
|
||||
* Parse string representation of the server version.
|
||||
*/
|
||||
uint32
|
||||
parse_server_version(const char *server_version_str)
|
||||
{
|
||||
int nfields;
|
||||
int result = 0;
|
||||
uint32 result = 0;
|
||||
int major_version = 0;
|
||||
int minor_version = 0;
|
||||
|
||||
@ -304,7 +392,31 @@ parse_server_version(char *server_version_str)
|
||||
result = major_version * 10000;
|
||||
}
|
||||
else
|
||||
elog(ERROR, "Unknown server version format");
|
||||
elog(ERROR, "Unknown server version format %s", server_version_str);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse string representation of the program version.
|
||||
*/
|
||||
uint32
|
||||
parse_program_version(const char *program_version)
|
||||
{
|
||||
int nfields;
|
||||
int major = 0,
|
||||
minor = 0,
|
||||
micro = 0;
|
||||
uint32 result = 0;
|
||||
|
||||
if (program_version == NULL || program_version[0] == '\0')
|
||||
return 0;
|
||||
|
||||
nfields = sscanf(program_version, "%d.%d.%d", &major, &minor, µ);
|
||||
if (nfields == 3)
|
||||
result = major * 10000 + minor * 100 + micro;
|
||||
else
|
||||
elog(ERROR, "Unknown program version format %s", program_version);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
*
|
||||
* logger.c: - log events into log file or stderr.
|
||||
*
|
||||
* Copyright (c) 2017-2017, Postgres Professional
|
||||
* Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -2,7 +2,7 @@
|
||||
*
|
||||
* logger.h: - prototypes of logger functions.
|
||||
*
|
||||
* Copyright (c) 2017-2017, Postgres Professional
|
||||
* Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -3,7 +3,7 @@
|
||||
* pgut.c
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2017-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1655,8 +1655,8 @@ pgut_disconnect(PGconn *conn)
|
||||
|
||||
|
||||
PGresult *
|
||||
pgut_execute_parallel(PGconn* conn,
|
||||
PGcancel* thread_cancel_conn, const char *query,
|
||||
pgut_execute_parallel(PGconn* conn,
|
||||
PGcancel* thread_cancel_conn, const char *query,
|
||||
int nParams, const char **params,
|
||||
bool text_result)
|
||||
{
|
||||
|
@ -3,7 +3,7 @@
|
||||
* pgut.h
|
||||
*
|
||||
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2017-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -115,7 +115,7 @@ extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams,
|
||||
const char **params);
|
||||
extern PGresult *pgut_execute_extended(PGconn* conn, const char *query, int nParams,
|
||||
const char **params, bool text_result, bool ok_error);
|
||||
extern PGresult *pgut_execute_parallel(PGconn* conn, PGcancel* thread_cancel_conn,
|
||||
extern PGresult *pgut_execute_parallel(PGconn* conn, PGcancel* thread_cancel_conn,
|
||||
const char *query, int nParams,
|
||||
const char **params, bool text_result);
|
||||
extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char **params, int elevel);
|
||||
|
@ -3,7 +3,7 @@
|
||||
* validate.c: validate backup files.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -26,6 +26,7 @@ typedef struct
|
||||
bool corrupted;
|
||||
XLogRecPtr stop_lsn;
|
||||
uint32 checksum_version;
|
||||
uint32 backup_version;
|
||||
|
||||
/*
|
||||
* Return value from the thread.
|
||||
@ -106,6 +107,7 @@ pgBackupValidate(pgBackup *backup)
|
||||
arg->corrupted = false;
|
||||
arg->stop_lsn = backup->stop_lsn;
|
||||
arg->checksum_version = backup->checksum_version;
|
||||
arg->backup_version = parse_program_version(backup->program_version);
|
||||
/* By default there are some error */
|
||||
threads_args[i].ret = 1;
|
||||
|
||||
@ -207,20 +209,43 @@ pgBackupValidateFiles(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
crc = pgFileGetCRC(file->path);
|
||||
if (crc != file->crc)
|
||||
/*
|
||||
* If option skip-block-validation is set, compute only file-level CRC for
|
||||
* datafiles, otherwise check them block by block.
|
||||
*/
|
||||
if (!file->is_datafile || skip_block_validation)
|
||||
{
|
||||
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
|
||||
file->path, file->crc, crc);
|
||||
arguments->corrupted = true;
|
||||
|
||||
/* validate relation blocks */
|
||||
if (file->is_datafile)
|
||||
/*
|
||||
* Pre 2.0.22 we use CRC-32C, but in newer version of pg_probackup we
|
||||
* use CRC-32.
|
||||
*
|
||||
* pg_control stores its content and checksum of the content, calculated
|
||||
* using CRC-32C. If we calculate checksum of the whole pg_control using
|
||||
* CRC-32C we get same checksum constantly. It might be because of the
|
||||
* CRC-32C algorithm.
|
||||
* To avoid this problem we need to use different algorithm, CRC-32 in
|
||||
* this case.
|
||||
*/
|
||||
crc = pgFileGetCRC(file->path, arguments->backup_version <= 20021);
|
||||
if (crc != file->crc)
|
||||
{
|
||||
if (!check_file_pages(file, arguments->stop_lsn, arguments->checksum_version))
|
||||
arguments->corrupted = true;
|
||||
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
|
||||
file->path, file->crc, crc);
|
||||
arguments->corrupted = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* validate relation block by block
|
||||
* check page headers, checksums (if enabled)
|
||||
* and compute checksum of the file
|
||||
*/
|
||||
if (!check_file_pages(file, arguments->stop_lsn,
|
||||
arguments->checksum_version,
|
||||
arguments->backup_version))
|
||||
arguments->corrupted = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Data files validation is successful */
|
||||
|
@ -21,7 +21,6 @@ def load_tests(loader, tests, pattern):
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(logging))
|
||||
suite.addTests(loader.loadTestsFromModule(compression))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(delete_test))
|
||||
suite.addTests(loader.loadTestsFromModule(delta))
|
||||
suite.addTests(loader.loadTestsFromModule(exclude))
|
||||
@ -62,8 +61,6 @@ def load_tests(loader, tests, pattern):
|
||||
# logging:
|
||||
# https://jira.postgrespro.ru/browse/PGPRO-584
|
||||
# https://jira.postgrespro.ru/secure/attachment/20420/20420_doc_logging.md
|
||||
# ptrack:
|
||||
# ptrack backup on replica should work correctly
|
||||
# archive:
|
||||
# immediate recovery and full recovery
|
||||
# backward compatibility:
|
||||
|
@ -5,6 +5,7 @@ from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
from time import sleep
|
||||
from shutil import copyfile
|
||||
|
||||
|
||||
module_name = 'archive'
|
||||
@ -39,8 +40,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--log-level-file=verbose"])
|
||||
backup_dir, 'node', node)
|
||||
node.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
@ -53,8 +53,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Make backup
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--log-level-file=verbose"])
|
||||
backup_dir, 'node', node)
|
||||
node.cleanup()
|
||||
|
||||
# Restore Database
|
||||
@ -253,7 +252,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node,
|
||||
options=[
|
||||
"--archive-timeout=60",
|
||||
"--log-level-file=verbose",
|
||||
"--stream"]
|
||||
)
|
||||
# we should die here because exception is what we expect to happen
|
||||
@ -402,7 +400,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
def test_replica_archive(self):
|
||||
"""
|
||||
make node without archiving, take stream backup and
|
||||
@ -417,7 +415,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s',
|
||||
'archive_timeout': '10s',
|
||||
'max_wal_size': '1GB'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
@ -433,7 +431,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -459,9 +457,6 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(256,512) i")
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
# ADD INSTANCE 'REPLICA'
|
||||
|
||||
sleep(1)
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
@ -469,7 +464,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
'--archive-timeout=30',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
|
||||
@ -493,16 +490,28 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(512,768) i")
|
||||
"from generate_series(512,20680) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
# copyfile(
|
||||
# os.path.join(backup_dir, 'wal/master/000000010000000000000002'),
|
||||
# os.path.join(backup_dir, 'wal/replica/000000010000000000000002'))
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='page',
|
||||
options=[
|
||||
'--archive-timeout=30', '--log-level-file=verbose',
|
||||
'--master-host=localhost', '--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)]
|
||||
)
|
||||
'--archive-timeout=30',
|
||||
'--master-db=postgres',
|
||||
'--master-host=localhost',
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
|
||||
@ -511,8 +520,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
node.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
|
||||
node.slow_start()
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
@ -537,7 +548,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '30s'}
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
@ -568,7 +579,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata_replica = self.pgdata_content(replica.data_dir)
|
||||
self.compare_pgdata(pgdata_master, pgdata_replica)
|
||||
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_replica(master, replica)
|
||||
# ADD INSTANCE REPLICA
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
# SET ARCHIVING FOR REPLICA
|
||||
@ -579,16 +590,26 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
"insert into t_heap select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0, 60000) i")
|
||||
|
||||
# TAKE FULL ARCHIVE BACKUP FROM REPLICA
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000001'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000001'))
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'--archive-timeout=20',
|
||||
'--log-level-file=verbose',
|
||||
'--archive-timeout=30',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)]
|
||||
)
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
|
||||
@ -618,7 +639,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '30s'}
|
||||
'checkpoint_timeout': '30s',
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
|
@ -328,7 +328,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream", '--log-level-file=verbose'])
|
||||
options=["-j", "4", "--stream", "--log-level-file=verbose"])
|
||||
|
||||
# open log file and check
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
@ -520,3 +520,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -94,8 +94,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -195,8 +194,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, backup_type='delta')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -296,8 +294,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, backup_type='delta')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -311,3 +308,162 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_compression(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with OLD binary
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
old_binary=True,
|
||||
options=['--compress'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
# restore OLD FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# PAGE backup with OLD binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"])
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page',
|
||||
old_binary=True,
|
||||
options=['--compress'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# PAGE backup with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"])
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page',
|
||||
options=['--compress'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta backup with old binary
|
||||
self.delete_pb(backup_dir, 'node', backup_id)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
old_binary=True,
|
||||
options=['--compress'])
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"])
|
||||
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='delta',
|
||||
options=['--compress'],
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta backup with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"])
|
||||
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='delta',
|
||||
options=['--compress'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
@ -55,9 +55,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
page_backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=[
|
||||
'--stream', '--compress-algorithm=zlib',
|
||||
'--log-level-console=verbose',
|
||||
'--log-level-file=verbose'])
|
||||
'--stream', '--compress-algorithm=zlib'])
|
||||
|
||||
# PTRACK BACKUP
|
||||
node.safe_psql(
|
||||
@ -494,3 +492,68 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@unittest.skip("skip")
|
||||
def test_uncompressable_pages(self):
|
||||
"""
|
||||
make archive node, create table with uncompressable toast pages,
|
||||
take backup with compression, make sure that page was not compressed,
|
||||
restore backup and check data correctness
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# node.safe_psql(
|
||||
# "postgres",
|
||||
# "create table t_heap as select i, "
|
||||
# "repeat('1234567890abcdefghiyklmn', 1)::bytea, "
|
||||
# "point(0,0) from generate_series(0,1) i")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t as select i, "
|
||||
"repeat(md5(i::text),5006056) as fat_attr "
|
||||
"from generate_series(0,10) i;")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='full',
|
||||
options=[
|
||||
'--compress'])
|
||||
|
||||
node.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='full',
|
||||
options=[
|
||||
'--compress'])
|
||||
|
||||
# Clean after yourself
|
||||
# self.del_test_dir(module_name, fname)
|
||||
|
||||
# create table t as select i, repeat(md5('1234567890'), 1)::bytea, point(0,0) from generate_series(0,1) i;
|
||||
|
||||
|
||||
# create table t_bytea_1(file oid);
|
||||
# INSERT INTO t_bytea_1 (file)
|
||||
# VALUES (lo_import('/home/gsmol/git/postgres/contrib/pg_probackup/tests/expected/sample.random', 24593));
|
||||
# insert into t_bytea select string_agg(data,'') from pg_largeobject where pageno > 0;
|
||||
#
|
@ -55,6 +55,11 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
|
||||
"""stub"""
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_delete_increment_page(self):
|
||||
"""delete increment and all after him"""
|
||||
|
@ -80,13 +80,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir,
|
||||
'node',
|
||||
node_restored,
|
||||
options=[
|
||||
"-j", "1",
|
||||
"--log-level-file=verbose"
|
||||
]
|
||||
backup_dir, 'node', node_restored
|
||||
)
|
||||
|
||||
# Physical comparison
|
||||
@ -176,8 +170,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
'node',
|
||||
node_restored,
|
||||
options=[
|
||||
"-j", "1",
|
||||
"--log-level-file=verbose",
|
||||
"-T", "{0}={1}".format(
|
||||
old_tablespace, new_tablespace)]
|
||||
)
|
||||
@ -251,13 +243,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir,
|
||||
'node',
|
||||
node_restored,
|
||||
options=[
|
||||
"-j", "1",
|
||||
"--log-level-file=verbose"
|
||||
]
|
||||
backup_dir, 'node', node_restored
|
||||
)
|
||||
|
||||
# Physical comparison
|
||||
@ -683,7 +669,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored,
|
||||
backup_id=backup_id,
|
||||
options=[
|
||||
"-j", "4", "--log-level-file=verbose",
|
||||
"-j", "4",
|
||||
"--immediate",
|
||||
"--recovery-target-action=promote"])
|
||||
|
||||
@ -717,7 +703,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored,
|
||||
backup_id=backup_id,
|
||||
options=[
|
||||
"-j", "4", "--log-level-file=verbose",
|
||||
"-j", "4",
|
||||
"--immediate",
|
||||
"--recovery-target-action=promote"]
|
||||
)
|
||||
@ -815,7 +801,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='delta',
|
||||
options=["--stream", "--log-level-file=verbose"]
|
||||
options=["--stream"]
|
||||
)
|
||||
# if self.paranoia:
|
||||
# pgdata_delta = self.pgdata_content(
|
||||
@ -844,7 +830,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored,
|
||||
backup_id=backup_id,
|
||||
options=[
|
||||
"-j", "4", "--log-level-file=verbose",
|
||||
"-j", "4",
|
||||
"--immediate",
|
||||
"--recovery-target-action=promote"])
|
||||
|
||||
@ -1135,7 +1121,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_corruption_heal_via_ptrack_1(self):
|
||||
def test_delta_corruption_heal_via_ptrack_1(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -1174,8 +1160,10 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
f.close
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="delta",
|
||||
options=["-j", "4", "--stream", "--log-level-file=verbose"])
|
||||
backup_dir, 'node', node,
|
||||
backup_type="delta",
|
||||
options=["-j", "4", "--stream", '--log-level-file=verbose'])
|
||||
|
||||
|
||||
# open log file and check
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
|
@ -143,7 +143,7 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose']
|
||||
options=['--stream']
|
||||
)
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
@ -50,6 +50,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--master-db=db_name] [--master-host=host_name]
|
||||
[--master-port=port] [--master-user=user_name]
|
||||
[--replica-timeout=timeout]
|
||||
[--skip-block-validation]
|
||||
|
||||
pg_probackup restore -B backup-path --instance=instance_name
|
||||
[-D pgdata-path] [-i backup-id] [--progress]
|
||||
@ -59,12 +60,14 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
|
||||
[--recovery-target-action=pause|promote|shutdown]
|
||||
[--restore-as-replica]
|
||||
[--no-validate]
|
||||
[--skip-block-validation]
|
||||
|
||||
pg_probackup validate -B backup-path [--instance=instance_name]
|
||||
[-i backup-id] [--progress]
|
||||
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
|
||||
[--recovery-target-name=target-name]
|
||||
[--timeline=timeline]
|
||||
[--skip-block-validation]
|
||||
|
||||
pg_probackup show -B backup-path
|
||||
[--instance=instance_name [-i backup-id]]
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 2.0.21
|
||||
pg_probackup 2.0.24
|
@ -143,7 +143,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose'],
|
||||
options=['--stream'],
|
||||
gdb=True
|
||||
)
|
||||
|
||||
@ -227,7 +227,7 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose'],
|
||||
options=['--stream'],
|
||||
gdb=True
|
||||
)
|
||||
|
||||
|
@ -874,7 +874,7 @@ class ProbackupTest(object):
|
||||
return out_dict
|
||||
|
||||
def set_archiving(
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False,
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False, compress=False,
|
||||
old_binary=False):
|
||||
|
||||
if replica:
|
||||
@ -895,7 +895,7 @@ class ProbackupTest(object):
|
||||
self.probackup_path, backup_dir, instance)
|
||||
|
||||
if os.name == 'posix':
|
||||
if self.archive_compress:
|
||||
if self.archive_compress or compress:
|
||||
archive_command = archive_command + "--compress "
|
||||
|
||||
if overwrite:
|
||||
|
137
tests/merge.py
137
tests/merge.py
@ -2,7 +2,7 @@
|
||||
|
||||
import unittest
|
||||
import os
|
||||
from .helpers.ptrack_helpers import ProbackupTest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
|
||||
module_name = "merge"
|
||||
|
||||
@ -407,17 +407,17 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"delete from t_heap where ctid >= '(11,0)'")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum t_heap")
|
||||
|
||||
self.backup_node(
|
||||
page_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
page_id = self.show_pb(backup_dir, "node")[1]["id"]
|
||||
self.merge_backup(backup_dir, "node", page_id)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
@ -602,7 +602,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('move_file')
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
if gdb.continue_execution_until_break(20) != 'breakpoint-hit':
|
||||
@ -615,3 +615,132 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Try to continue failed MERGE
|
||||
self.merge_backup(backup_dir, "node", backup_id)
|
||||
|
||||
# Drop node and restore it
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'node', node)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_continue_failed_merge_with_corrupted_delta_backup(self):
|
||||
"""
|
||||
Fail merge via gdb, corrupt DELTA backup, try to continue merge
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True, initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
# FULL backup
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id,"
|
||||
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
|
||||
" from generate_series(0,1000) i"
|
||||
)
|
||||
|
||||
old_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
|
||||
# DELTA BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta'
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"update t_heap set id = 100500"
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"vacuum full t_heap"
|
||||
)
|
||||
|
||||
new_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
|
||||
# DELTA BACKUP
|
||||
backup_id_2 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta'
|
||||
)
|
||||
|
||||
backup_id = self.show_pb(backup_dir, "node")[1]["id"]
|
||||
|
||||
# Failed MERGE
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
if gdb.continue_execution_until_break(2) != 'breakpoint-hit':
|
||||
print('Failed to hit breakpoint')
|
||||
exit(1)
|
||||
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
# CORRUPT incremental backup
|
||||
# read block from future
|
||||
# block_size + backup_header = 8200
|
||||
file = os.path.join(
|
||||
backup_dir, 'backups/node', backup_id_2, 'database', new_path)
|
||||
with open(file, 'rb') as f:
|
||||
f.seek(8200)
|
||||
block_1 = f.read(8200)
|
||||
f.close
|
||||
|
||||
# write block from future
|
||||
file = os.path.join(
|
||||
backup_dir, 'backups/node', backup_id, 'database', old_path)
|
||||
with open(file, 'r+b') as f:
|
||||
f.seek(8200)
|
||||
f.write(block_1)
|
||||
f.close
|
||||
|
||||
# Try to continue failed MERGE
|
||||
try:
|
||||
self.merge_backup(backup_dir, "node", backup_id)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of incremental backup corruption.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"WARNING: Backup {0} data files are corrupted".format(
|
||||
backup_id) in e.message and
|
||||
"ERROR: Merging of backup {0} failed".format(
|
||||
backup_id) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# 1. always use parent link when merging (intermediates may be from different chain)
|
||||
# 2. page backup we are merging with may disappear after failed merge,
|
||||
# it should not be possible to continue merge after that
|
||||
# PAGE_A MERGING (disappear)
|
||||
# FULL MERGING
|
||||
|
||||
# FULL MERGING
|
||||
|
||||
# PAGE_B OK (new backup)
|
||||
# FULL MERGING
|
||||
|
||||
# 3. Need new test with corrupted FULL backup
|
||||
|
@ -62,8 +62,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"vacuum t_heap")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
@ -333,8 +332,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# PAGE BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=["--log-level-file=verbose"])
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
# GET PHYSICAL CONTENT FROM NODE
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
@ -727,7 +725,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page',
|
||||
options=["-j", "4", '--log-level-file=verbose'])
|
||||
options=["-j", "4"])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
@ -797,8 +795,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of wal segment disappearance.\n "
|
||||
@ -936,6 +933,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'WAL file is from different database system: WAL file database system identifier is' in e.message and
|
||||
'pg_control database system identifier is' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file_destination) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
@ -961,6 +960,8 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
'INFO: Wait for LSN' in e.message and
|
||||
'in archived WAL segment' in e.message and
|
||||
'could not read WAL record at' in e.message and
|
||||
'WAL file is from different database system: WAL file database system identifier is' in e.message and
|
||||
'pg_control database system identifier is' in e.message and
|
||||
'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
|
||||
file_destination) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
|
@ -157,13 +157,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose']
|
||||
options=['--stream']
|
||||
)
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose']
|
||||
options=['--stream']
|
||||
)
|
||||
|
||||
self.restore_node(
|
||||
@ -246,14 +246,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
exit(1)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--log-level-file=verbose']
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--log-level-file=verbose']
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
@ -336,14 +333,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--log-level-file=verbose']
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--log-level-file=verbose']
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -409,7 +402,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose']
|
||||
options=['--stream']
|
||||
)
|
||||
|
||||
node.safe_psql(
|
||||
@ -479,7 +472,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose'],
|
||||
options=['--stream'],
|
||||
gdb=True
|
||||
)
|
||||
|
||||
@ -566,7 +559,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
ptrack_backup_id = self.backup_node(
|
||||
backup_dir, 'node',
|
||||
node, backup_type='ptrack',
|
||||
options=['--stream', '--log-level-file=verbose']
|
||||
options=['--stream']
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
@ -989,7 +982,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--stream", "--log-level-file=verbose"])
|
||||
options=["--stream"])
|
||||
|
||||
# CREATE DATABASE DB1
|
||||
node.safe_psql("postgres", "create database db1")
|
||||
@ -1002,7 +995,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='ptrack',
|
||||
options=["--stream", "--log-level-file=verbose"]
|
||||
options=["--stream"]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
@ -1133,7 +1126,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(node.port)
|
||||
'--master-port={0}'.format(node.port),
|
||||
'--stream'
|
||||
]
|
||||
)
|
||||
|
||||
@ -1229,7 +1223,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='ptrack',
|
||||
options=["--stream", "--log-level-file=verbose"]
|
||||
options=["--stream"]
|
||||
)
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -1315,7 +1309,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=["--stream", '--log-level-file=verbose'])
|
||||
options=["--stream"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -1476,7 +1470,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# FIRTS PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=["--stream", "--log-level-file=verbose"])
|
||||
options=["--stream"])
|
||||
|
||||
# GET PHYSICAL CONTENT FROM NODE
|
||||
if self.paranoia:
|
||||
@ -1517,7 +1511,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# SECOND PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=["--stream", "--log-level-file=verbose"])
|
||||
options=["--stream"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -1586,6 +1580,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
pgbench = node.pgbench(options=['-T', '150', '-c', '2', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
|
||||
node.safe_psql("postgres", "checkpoint")
|
||||
|
||||
idx_ptrack['new_size'] = self.get_fork_size(
|
||||
@ -1607,12 +1602,12 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
# GET LOGICAL CONTENT FROM NODE
|
||||
result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# it`s stupid, because hint`s are ignored by ptrack
|
||||
#result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# FIRTS PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=["--log-level-file=verbose"]
|
||||
)
|
||||
backup_dir, 'node', node, backup_type='ptrack')
|
||||
|
||||
# GET PHYSICAL CONTENT FROM NODE
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
@ -1647,7 +1642,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
# COMPARE RESTORED FILES
|
||||
self.assertEqual(result, result_new, 'data is lost')
|
||||
#self.assertEqual(result, result_new, 'data is lost')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1681,9 +1676,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=[
|
||||
"--stream", "-j 30",
|
||||
"--log-level-file=verbose"]
|
||||
)
|
||||
"--stream", "-j 30"])
|
||||
|
||||
# we should die here because exception is what we expect to happen
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
|
@ -33,7 +33,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
@ -75,7 +76,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['-j10', '--log-level-file=verbose'])
|
||||
options=['-j10'])
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
@ -151,7 +152,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
@ -33,7 +33,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
@ -111,7 +112,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -201,7 +203,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -300,7 +303,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
@ -34,7 +34,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
|
||||
"tablespace somedata")
|
||||
|
||||
@ -66,7 +67,7 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Take PTRACK backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['-j10', '--log-level-file=verbose'])
|
||||
options=['-j10'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -120,7 +121,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text,md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,2560) i")
|
||||
|
||||
|
@ -32,7 +32,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -104,7 +105,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.create_tblspace_in_node(master, 'somedata')
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -48,6 +49,10 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get fork size and calculate it in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -56,11 +61,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# calculate md5sums for every page of this fork
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
@ -130,7 +130,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
@ -156,11 +157,6 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get fork size and calculate it in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -169,6 +165,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# calculate md5sums for every page of this fork
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -121,7 +122,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
@ -32,7 +32,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -122,7 +123,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,256000) i")
|
||||
for i in idx_ptrack:
|
||||
|
@ -31,7 +31,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
@ -123,7 +124,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
|
272
tests/replica.py
272
tests/replica.py
@ -5,6 +5,7 @@ from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
import time
|
||||
from shutil import copyfile
|
||||
|
||||
|
||||
module_name = 'replica'
|
||||
@ -64,6 +65,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"from generate_series(256,512) i")
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
@ -80,9 +82,11 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir="{0}/{1}/node".format(module_name, fname))
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -95,7 +99,9 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(512,768) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='ptrack',
|
||||
options=[
|
||||
@ -111,9 +117,11 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
node.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
@ -136,13 +144,12 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
'checkpoint_timeout': '30s',
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
@ -155,7 +162,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
@ -166,6 +173,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
# Settings for Replica
|
||||
self.set_replica(master, replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Check data correctness on replica
|
||||
@ -179,16 +187,32 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(256,512) i")
|
||||
"from generate_series(256,5120) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000003'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000003'))
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000004'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000004'))
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000005'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000005'))
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'--archive-timeout=300',
|
||||
'--archive-timeout=30',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
|
||||
@ -201,9 +225,11 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
node.cleanup()
|
||||
|
||||
# Change data on master, make PAGE backup from replica,
|
||||
# restore taken backup and check that restored data equal
|
||||
@ -212,30 +238,42 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(512,768) i")
|
||||
"from generate_series(512,22680) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='page',
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='page',
|
||||
options=[
|
||||
'--archive-timeout=300',
|
||||
'--archive-timeout=30',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
'OK', self.show_pb(backup_dir, 'replica', backup_id)['status'])
|
||||
|
||||
# RESTORE PAGE BACKUP TAKEN FROM replica
|
||||
node.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'archive_mode = off')
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['--stream'])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -279,15 +317,217 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'master', master, backup_type='page')
|
||||
self.restore_node(
|
||||
backup_dir, 'master', replica,
|
||||
options=['-R', '--recovery-target-action=promote'])
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
# Settings for Replica
|
||||
# self.set_replica(master, replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
replica.start()
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'hot_standby = on')
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000003'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000003'))
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_take_backup_from_delayed_replica(self):
|
||||
"""
|
||||
make archive master, take full backups from master,
|
||||
restore full backup as delayed replica, launch pgbench,
|
||||
take FULL, PAGE and DELTA backups from replica
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
#master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
# Settings for Replica
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
|
||||
# stupid hack
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000001'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000001'))
|
||||
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'hot_standby = on')
|
||||
|
||||
replica.append_conf(
|
||||
'recovery.conf', "recovery_min_apply_delay = '300s'")
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
master.pgbench_init(scale=10)
|
||||
|
||||
pgbench = master.pgbench(
|
||||
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
data_dir=replica.data_dir, backup_type='page')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='delta')
|
||||
|
||||
pgbench.wait()
|
||||
|
||||
pgbench = master.pgbench(
|
||||
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=['--stream'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
backup_type='page', options=['--stream'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
backup_type='delta', options=['--stream'])
|
||||
|
||||
pgbench.wait()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@unittest.skip("skip")
|
||||
def test_make_block_from_future(self):
|
||||
"""
|
||||
make archive master, take full backups from master,
|
||||
restore full backup as replica, launch pgbench,
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
#master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
# Settings for Replica
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'hot_standby = on')
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
master.pgbench_init(scale=10)
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_last_xlog_receive_location()'))
|
||||
#
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_last_xlog_replay_location()'))
|
||||
#
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_control_checkpoint()'))
|
||||
#
|
||||
# replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'checkpoint')
|
||||
|
||||
pgbench = master.pgbench(options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
#self.backup_node(backup_dir, 'replica', replica, options=['--stream'])
|
||||
exit(1)
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
pgbench.wait()
|
||||
|
||||
# pgbench
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256000) i")
|
||||
|
||||
|
||||
master.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'select * from pg_')
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
exit(1)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
@ -5,6 +5,7 @@ from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
import time
|
||||
import hashlib
|
||||
|
||||
|
||||
module_name = 'validate'
|
||||
@ -49,7 +50,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
f.close
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=["--log-level-file=verbose"])
|
||||
backup_dir, 'node', node, options=['--log-level-file=verbose'])
|
||||
|
||||
log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log")
|
||||
|
||||
@ -258,8 +259,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Simple validate
|
||||
try:
|
||||
self.validate_pb(
|
||||
backup_dir, 'node', backup_id=backup_id_2,
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', backup_id=backup_id_2)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of data files corruption.\n "
|
||||
@ -363,8 +363,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Validate PAGE1
|
||||
try:
|
||||
self.validate_pb(
|
||||
backup_dir, 'node', backup_id=backup_id_2,
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node', backup_id=backup_id_2)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of data files corruption.\n "
|
||||
@ -519,8 +518,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
try:
|
||||
self.validate_pb(
|
||||
backup_dir, 'node',
|
||||
backup_id=backup_id_4,
|
||||
options=['--log-level-file=verbose'])
|
||||
backup_id=backup_id_4)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of data files corruption.\n"
|
||||
@ -720,7 +718,6 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.validate_pb(
|
||||
backup_dir, 'node',
|
||||
options=[
|
||||
'--log-level-file=verbose',
|
||||
'-i', backup_id_4, '--xid={0}'.format(target_xid)])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
@ -865,7 +862,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Validate Instance
|
||||
try:
|
||||
self.validate_pb(
|
||||
backup_dir, 'node', options=['--log-level-file=verbose'])
|
||||
backup_dir, 'node')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of data files corruption.\n "
|
||||
@ -1005,7 +1002,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Validate Instance
|
||||
try:
|
||||
self.validate_pb(backup_dir, 'node', options=['--log-level-file=verbose'])
|
||||
self.validate_pb(backup_dir, 'node')
|
||||
self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
@ -1091,7 +1088,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Validate Instance
|
||||
try:
|
||||
self.validate_pb(backup_dir, 'node', options=['--log-level-file=verbose'])
|
||||
self.validate_pb(backup_dir, 'node')
|
||||
self.assertEqual(1, 0, "Expecting Error because of data files corruption.\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
@ -1218,7 +1215,6 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'node',
|
||||
backup_id,
|
||||
options=[
|
||||
"--log-level-console=verbose",
|
||||
"--xid={0}".format(target_xid)])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
@ -1387,7 +1383,6 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'node',
|
||||
backup_id,
|
||||
options=[
|
||||
"--log-level-console=verbose",
|
||||
"--xid={0}".format(target_xid)])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
@ -1670,7 +1665,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
os.rename(file_new, file)
|
||||
try:
|
||||
self.validate_pb(backup_dir, options=['--log-level-file=verbose'])
|
||||
self.validate_pb(backup_dir)
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
'WARNING: Some backups are not valid'.format(
|
||||
@ -1775,7 +1770,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
os.rename(file, file_new)
|
||||
|
||||
try:
|
||||
self.validate_pb(backup_dir, options=['--log-level-file=verbose'])
|
||||
self.validate_pb(backup_dir)
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
'WARNING: Some backups are not valid'.format(
|
||||
@ -3064,4 +3059,85 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_corrupt_pg_control_via_resetxlog(self):
|
||||
""" PGPRO-2096 """
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
if self.get_version(node) < 100000:
|
||||
pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
|
||||
wal_dir = 'pg_xlog'
|
||||
else:
|
||||
pg_resetxlog_path = self.get_bin_path('pg_resetwal')
|
||||
wal_dir = 'pg_wal'
|
||||
|
||||
os.mkdir(
|
||||
os.path.join(
|
||||
backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status'))
|
||||
|
||||
pg_control_path = os.path.join(
|
||||
backup_dir, 'backups', 'node',
|
||||
backup_id, 'database', 'global', 'pg_control')
|
||||
|
||||
md5_before = hashlib.md5(
|
||||
open(pg_control_path, 'rb').read()).hexdigest()
|
||||
|
||||
self.run_binary(
|
||||
[
|
||||
pg_resetxlog_path,
|
||||
os.path.join(backup_dir, 'backups', 'node', backup_id, 'database'),
|
||||
'-o 42',
|
||||
'-f'
|
||||
],
|
||||
async=False)
|
||||
|
||||
md5_after = hashlib.md5(
|
||||
open(pg_control_path, 'rb').read()).hexdigest()
|
||||
|
||||
if self.verbose:
|
||||
print('\n MD5 BEFORE resetxlog: {0}\n MD5 AFTER resetxlog: {1}'.format(
|
||||
md5_before, md5_after))
|
||||
|
||||
# Validate backup
|
||||
try:
|
||||
self.validate_pb(backup_dir, 'node')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of pg_control change.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
'data files are corrupted',
|
||||
e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# validate empty backup list
|
||||
# page from future during validate
|
||||
# page from future during backup
|
||||
|
||||
# corrupt block, so file become unaligned:
|
||||
# 712 Assert(header.compressed_size <= BLCKSZ);
|
||||
# 713
|
||||
# 714 read_len = fread(compressed_page.data, 1,
|
||||
# 715 MAXALIGN(header.compressed_size), in);
|
||||
# 716 if (read_len != MAXALIGN(header.compressed_size))
|
||||
# -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
|
||||
# 718 blknum, file->path, read_len, header.compressed_size);
|
||||
|
Loading…
Reference in New Issue
Block a user