1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-09 14:45:47 +02:00

Merge branch 'master' into stable

This commit is contained in:
Arthur Zakirov 2018-10-09 20:07:09 +03:00
commit 369695e376
25 changed files with 922 additions and 244 deletions

View File

@ -32,7 +32,7 @@ else
srchome=$(top_srcdir)
endif
ifeq ($(MAJORVERSION),10)
ifeq (,$(filter 9.5 9.6,$(MAJORVERSION)))
OBJS += src/walmethods.o
EXTRA_CLEAN += src/walmethods.c src/walmethods.h
INCLUDES += src/walmethods.h
@ -64,7 +64,7 @@ src/streamutil.h: $(top_srcdir)/src/bin/pg_basebackup/streamutil.h
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/streamutil.h $@
ifeq ($(MAJORVERSION),10)
ifeq (,$(filter 9.5 9.6,$(MAJORVERSION)))
src/walmethods.c: $(top_srcdir)/src/bin/pg_basebackup/walmethods.c
rm -f $@ && $(LN_S) $(srchome)/src/bin/pg_basebackup/walmethods.c $@
src/walmethods.h: $(top_srcdir)/src/bin/pg_basebackup/walmethods.h

View File

@ -3,12 +3,12 @@
`pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure.
The utility is compatible with:
* PostgreSQL 9.5, 9.6, 10;
* PostgreSQL 9.5, 9.6, 10, 11;
`PTRACK` backup support provided via following options:
* vanilla PostgreSQL compiled with ptrack patch. Currently there are patches for [PostgreSQL 9.6](https://gist.githubusercontent.com/gsmol/5b615c971dfd461c76ef41a118ff4d97/raw/e471251983f14e980041f43bea7709b8246f4178/ptrack_9.6.6_v1.5.patch) and [PostgreSQL 10](https://gist.githubusercontent.com/gsmol/be8ee2a132b88463821021fd910d960e/raw/de24f9499f4f314a4a3e5fae5ed4edb945964df8/ptrack_10.1_v1.5.patch)
* Postgres Pro Standard 9.5, 9.6
* Postgres Pro Enterprise
* Postgres Pro Standard 9.5, 9.6, 10
* Postgres Pro Enterprise 9.5, 9.6, 10
As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data:
* Choosing between full and page-level incremental backups to speed up backup and recovery
@ -97,4 +97,4 @@ Postgres Professional, Moscow, Russia.
## Credits
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.
`pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier.

View File

@ -650,8 +650,9 @@ do_backup_instance(void)
* reading WAL segments present in archives up to the point
* where this backup has started.
*/
extractPageMap(arclog_path, prev_backup->start_lsn, current.tli,
current.start_lsn, backup_files_list);
extractPageMap(arclog_path, current.tli, xlog_seg_size,
prev_backup->start_lsn, current.start_lsn,
backup_files_list);
}
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
@ -827,6 +828,11 @@ do_backup(time_t start_time)
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
#if PG_VERSION_NUM >= 110000
if (!RetrieveWalSegSize(backup_conn))
elog(ERROR, "Failed to retreive wal_segment_size");
#endif
current.compress_alg = compress_alg;
current.compress_level = compress_level;
@ -918,8 +924,9 @@ do_backup(time_t start_time)
/* compute size of wal files of this backup stored in the archive */
if (!current.stream)
{
current.wal_bytes = XLOG_SEG_SIZE *
(current.stop_lsn/XLogSegSize - current.start_lsn/XLogSegSize + 1);
current.wal_bytes = xlog_seg_size *
(current.stop_lsn / xlog_seg_size -
current.start_lsn / xlog_seg_size + 1);
}
/* Backup is done. Update backup status */
@ -1142,10 +1149,11 @@ pg_switch_wal(PGconn *conn)
res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL);
PQclear(res);
if (server_version >= 100000)
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL);
else
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL);
#if PG_VERSION_NUM >= 100000
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL);
#else
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL);
#endif
PQclear(res);
}
@ -1466,10 +1474,10 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
tli = get_current_timeline(false);
/* Compute the name of the WAL file containig requested LSN */
XLByteToSeg(lsn, targetSegNo);
GetXLogSegNo(lsn, targetSegNo, xlog_seg_size);
if (wait_prev_segment)
targetSegNo--;
XLogFileName(wal_segment, tli, targetSegNo);
GetXLogFileName(wal_segment, tli, targetSegNo, xlog_seg_size);
/*
* In pg_start_backup we wait for 'lsn' in 'pg_wal' directory iff it is
@ -1535,7 +1543,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
/*
* A WAL segment found. Check LSN on it.
*/
if (wal_contains_lsn(wal_segment_dir, lsn, tli))
if (wal_contains_lsn(wal_segment_dir, lsn, tli, xlog_seg_size))
/* Target LSN was found */
{
elog(LOG, "Found LSN: %X/%X", (uint32) (lsn >> 32), (uint32) lsn);
@ -1584,9 +1592,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
while (true)
{
PGresult *res;
uint32 lsn_hi;
uint32 lsn_lo;
XLogRecPtr replica_lsn;
/*
@ -1595,12 +1600,7 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
*/
if (is_start_backup)
{
if (server_version >= 100000)
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_replay_lsn()",
0, NULL);
else
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_replay_location()",
0, NULL);
replica_lsn = get_checkpoint_location(backup_conn);
}
/*
* For lsn from pg_stop_backup() we need it only to be received by
@ -1608,19 +1608,24 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
*/
else
{
if (server_version >= 100000)
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
0, NULL);
else
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
0, NULL);
}
PGresult *res;
uint32 lsn_hi;
uint32 lsn_lo;
/* Extract timeline and LSN from result */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
/* Calculate LSN */
replica_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
PQclear(res);
#if PG_VERSION_NUM >= 100000
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
0, NULL);
#else
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
0, NULL);
#endif
/* Extract LSN from result */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
/* Calculate LSN */
replica_lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
PQclear(res);
}
/* target lsn was replicated */
if (replica_lsn >= lsn)
@ -1948,7 +1953,7 @@ pg_stop_backup(pgBackup *backup)
elog(LOG, "Getting the Recovery Time from WAL");
if (!read_recovery_info(xlog_path, backup->tli,
if (!read_recovery_info(xlog_path, backup->tli, xlog_seg_size,
backup->start_lsn, backup->stop_lsn,
&backup->recovery_time, &backup->recovery_xid))
{
@ -2201,7 +2206,8 @@ parse_backup_filelist_filenames(parray *files, const char *root)
/* Yes, it is */
if (sscanf_result == 2 &&
strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY,
strlen(TABLESPACE_VERSION_DIRECTORY)) == 0)
set_cfs_datafiles(files, root, relative, i);
}
}
@ -2555,7 +2561,7 @@ StreamLog(void *arg)
/*
* Always start streaming at the beginning of a segment
*/
startpos -= startpos % XLOG_SEG_SIZE;
startpos -= startpos % xlog_seg_size;
/* Initialize timeout */
stream_stop_timeout = 0;

View File

@ -572,23 +572,23 @@ readBackupControlFile(const char *path)
pgBackupInit(backup);
if (access(path, F_OK) != 0)
{
elog(WARNING, "control file \"%s\" doesn't exist", path);
elog(WARNING, "Control file \"%s\" doesn't exist", path);
pgBackupFree(backup);
return NULL;
}
parsed_options = pgut_readopt(path, options, WARNING);
parsed_options = pgut_readopt(path, options, WARNING, true);
if (parsed_options == 0)
{
elog(WARNING, "control file \"%s\" is empty", path);
elog(WARNING, "Control file \"%s\" is empty", path);
pgBackupFree(backup);
return NULL;
}
if (backup->start_time == 0)
{
elog(WARNING, "invalid ID/start-time, control file \"%s\" is corrupted", path);
elog(WARNING, "Invalid ID/start-time, control file \"%s\" is corrupted", path);
pgBackupFree(backup);
return NULL;
}
@ -607,7 +607,7 @@ readBackupControlFile(const char *path)
if (sscanf(start_lsn, "%X/%X", &xlogid, &xrecoff) == 2)
backup->start_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
else
elog(WARNING, "invalid START_LSN \"%s\"", start_lsn);
elog(WARNING, "Invalid START_LSN \"%s\"", start_lsn);
free(start_lsn);
}
@ -619,7 +619,7 @@ readBackupControlFile(const char *path)
if (sscanf(stop_lsn, "%X/%X", &xlogid, &xrecoff) == 2)
backup->stop_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
else
elog(WARNING, "invalid STOP_LSN \"%s\"", stop_lsn);
elog(WARNING, "Invalid STOP_LSN \"%s\"", stop_lsn);
free(stop_lsn);
}
@ -644,7 +644,7 @@ readBackupControlFile(const char *path)
else if (strcmp(status, "CORRUPT") == 0)
backup->status = BACKUP_STATUS_CORRUPT;
else
elog(WARNING, "invalid STATUS \"%s\"", status);
elog(WARNING, "Invalid STATUS \"%s\"", status);
free(status);
}

View File

@ -102,6 +102,13 @@ void
pgBackupConfigInit(pgBackupConfig *config)
{
config->system_identifier = 0;
#if PG_VERSION_NUM >= 110000
config->xlog_seg_size = 0;
#else
config->xlog_seg_size = XLOG_SEG_SIZE;
#endif
config->pgdata = NULL;
config->pgdatabase = NULL;
config->pghost = NULL;
@ -140,6 +147,9 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
fprintf(out, "#Backup instance info\n");
fprintf(out, "PGDATA = %s\n", config->pgdata);
fprintf(out, "system-identifier = " UINT64_FORMAT "\n", config->system_identifier);
#if PG_VERSION_NUM >= 110000
fprintf(out, "xlog-seg-size = %u\n", config->xlog_seg_size);
#endif
fprintf(out, "#Connection parameters:\n");
if (config->pgdatabase)
@ -161,12 +171,12 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
if (config->master_user)
fprintf(out, "master-user = %s\n", config->master_user);
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
&res, &unit);
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
fprintf(out, "#Archive parameters:\n");
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
&res, &unit);
fprintf(out, "archive-timeout = " UINT64_FORMAT "%s\n", res, unit);
@ -183,11 +193,11 @@ writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
fprintf(out, "log-directory = %s\n", config->log_directory);
/* Convert values from base unit */
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
&res, &unit);
&res, &unit);
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, (res)?unit:"KB");
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
&res, &unit);
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
&res, &unit);
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, (res)?unit:"min");
fprintf(out, "#Retention parameters:\n");
@ -237,8 +247,8 @@ readBackupCatalogConfigFile(void)
{ 's', 0, "log-filename", &(config->log_filename), SOURCE_CMDLINE },
{ 's', 0, "error-log-filename", &(config->error_log_filename), SOURCE_CMDLINE },
{ 's', 0, "log-directory", &(config->log_directory), SOURCE_CMDLINE },
{ 'u', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'u', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'U', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'U', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
/* connection options */
{ 's', 0, "pgdata", &(config->pgdata), SOURCE_FILE_STRICT },
{ 's', 0, "pgdatabase", &(config->pgdatabase), SOURCE_FILE_STRICT },
@ -250,11 +260,14 @@ readBackupCatalogConfigFile(void)
{ 's', 0, "master-port", &(config->master_port), SOURCE_FILE_STRICT },
{ 's', 0, "master-db", &(config->master_db), SOURCE_FILE_STRICT },
{ 's', 0, "master-user", &(config->master_user), SOURCE_FILE_STRICT },
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
/* other options */
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
#if PG_VERSION_NUM >= 110000
{'u', 0, "xlog-seg-size", &config->xlog_seg_size, SOURCE_FILE_STRICT},
#endif
/* archive options */
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
{0}
};
@ -263,11 +276,44 @@ readBackupCatalogConfigFile(void)
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgBackupConfigInit(config);
pgut_readopt(path, options, ERROR);
pgut_readopt(path, options, ERROR, true);
#if PG_VERSION_NUM >= 110000
if (!IsValidWalSegSize(config->xlog_seg_size))
elog(ERROR, "Invalid WAL segment size %u", config->xlog_seg_size);
#endif
return config;
}
/*
* Read xlog-seg-size from BACKUP_CATALOG_CONF_FILE.
*/
uint32
get_config_xlog_seg_size(void)
{
#if PG_VERSION_NUM >= 110000
char path[MAXPGPATH];
uint32 seg_size;
pgut_option options[] =
{
{'u', 0, "xlog-seg-size", &seg_size, SOURCE_FILE_STRICT},
{0}
};
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgut_readopt(path, options, ERROR, false);
if (!IsValidWalSegSize(seg_size))
elog(ERROR, "Invalid WAL segment size %u", seg_size);
return seg_size;
#else
return (uint32) XLOG_SEG_SIZE;
#endif
}
static void
opt_log_level_console(pgut_option *opt, const char *arg)
{
@ -349,6 +395,11 @@ show_configure_json(pgBackupConfig *config)
json_add_key(buf, "system-identifier", json_level, true);
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
#if PG_VERSION_NUM >= 110000
json_add_key(buf, "xlog-seg-size", json_level, true);
appendPQExpBuffer(buf, "%u", config->xlog_seg_size);
#endif
/* Connection parameters */
if (config->pgdatabase)
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
@ -373,13 +424,13 @@ show_configure_json(pgBackupConfig *config)
true);
json_add_key(buf, "replica-timeout", json_level, true);
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_S,
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
/* Archive parameters */
json_add_key(buf, "archive-timeout", json_level, true);
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_S,
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
@ -416,7 +467,7 @@ show_configure_json(pgBackupConfig *config)
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"KB");
json_add_key(buf, "log-rotation-age", json_level, true);
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_S,
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
&res, &unit);
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"min");

View File

@ -1405,3 +1405,215 @@ calc_file_checksum(pgFile *file)
return true;
}
/* Validate given page
* return value:
* 0 - if the page is not found
* 1 - if the page is found and valid
* -1 - if the page is found but invalid
*/
#define PAGE_IS_NOT_FOUND 0
#define PAGE_IS_FOUND_AND_VALID 1
#define PAGE_IS_FOUND_AND__NOT_VALID -1
static int
validate_one_page(Page page, pgFile *file,
BlockNumber blknum, XLogRecPtr stop_lsn,
uint32 checksum_version)
{
PageHeader phdr;
XLogRecPtr lsn;
bool page_header_is_sane = false;
bool checksum_is_ok = false;
/* new level of paranoia */
if (page == NULL)
{
elog(LOG, "File %s, block %u, page is NULL",
file->path, blknum);
return PAGE_IS_NOT_FOUND;
}
if (PageIsNew(page))
{
int i;
/* Check if the page is zeroed. */
for(i = 0; i < BLCKSZ && page[i] == 0; i++);
if (i == BLCKSZ)
{
elog(LOG, "File: %s blknum %u, page is New. empty zeroed page",
file->path, blknum);
return PAGE_IS_FOUND_AND_VALID;
}
else
{
elog(WARNING, "File: %s, block %u, page is New, but not zeroed",
file->path, blknum);
}
/* Page is zeroed. No sense to check header and checksum. */
page_header_is_sane = false;
}
else
{
phdr = (PageHeader) page;
if (PageGetPageSize(phdr) == BLCKSZ &&
PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION &&
(phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
phdr->pd_lower >= SizeOfPageHeaderData &&
phdr->pd_lower <= phdr->pd_upper &&
phdr->pd_upper <= phdr->pd_special &&
phdr->pd_special <= BLCKSZ &&
phdr->pd_special == MAXALIGN(phdr->pd_special))
page_header_is_sane = true;
}
if (page_header_is_sane)
{
/* Verify checksum */
if(checksum_version)
{
/*
* If checksum is wrong, sleep a bit and then try again
* several times. If it didn't help, throw error
*/
if (pg_checksum_page(page, file->segno * RELSEG_SIZE + blknum)
== ((PageHeader) page)->pd_checksum)
{
checksum_is_ok = true;
}
else
{
elog(WARNING, "File: %s blknum %u have wrong checksum",
file->path, blknum);
}
}
if (!checksum_version)
{
/* Get lsn from page header. Ensure that page is from our time */
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
if (lsn > stop_lsn)
elog(WARNING, "File: %s, block %u, checksum is not enabled."
"page is from future: pageLSN %X/%X stopLSN %X/%X",
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
else
return PAGE_IS_FOUND_AND_VALID;
}
if (checksum_is_ok)
{
/* Get lsn from page header. Ensure that page is from our time */
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
if (lsn > stop_lsn)
elog(WARNING, "File: %s, block %u, checksum is correct."
"page is from future: pageLSN %X/%X stopLSN %X/%X",
file->path, blknum, (uint32) (lsn >> 32), (uint32) lsn,
(uint32) (stop_lsn >> 32), (uint32) stop_lsn);
else
return PAGE_IS_FOUND_AND_VALID;
}
}
return PAGE_IS_FOUND_AND__NOT_VALID;
}
/* Valiate pages of datafile in backup one by one */
bool
check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
{
size_t read_len = 0;
bool is_valid = true;
FILE *in;
elog(VERBOSE, "validate relation blocks for file %s", file->name);
in = fopen(file->path, PG_BINARY_R);
if (in == NULL)
{
if (errno == ENOENT)
{
elog(WARNING, "File \"%s\" is not found", file->path);
return false;
}
elog(ERROR, "cannot open file \"%s\": %s",
file->path, strerror(errno));
}
/* read and validate pages one by one */
while (true)
{
Page compressed_page = NULL; /* used as read buffer */
Page page = NULL;
BackupPageHeader header;
BlockNumber blknum;
/* read BackupPageHeader */
read_len = fread(&header, 1, sizeof(header), in);
if (read_len != sizeof(header))
{
int errno_tmp = errno;
if (read_len == 0 && feof(in))
break; /* EOF found */
else if (read_len != 0 && feof(in))
elog(ERROR,
"odd size page found at block %u of \"%s\"",
blknum, file->path);
else
elog(ERROR, "cannot read header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno_tmp));
}
if (header.block < blknum)
elog(ERROR, "backup is broken at file->path %s block %u",
file->path, blknum);
blknum = header.block;
if (header.compressed_size == PageIsTruncated)
{
elog(LOG, "File %s, block %u is truncated",
file->path, blknum);
continue;
}
Assert(header.compressed_size <= BLCKSZ);
read_len = fread(compressed_page, 1,
MAXALIGN(header.compressed_size), in);
if (read_len != MAXALIGN(header.compressed_size))
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
blknum, file->path, read_len, header.compressed_size);
if (header.compressed_size != BLCKSZ)
{
int32 uncompressed_size = 0;
uncompressed_size = do_decompress(page, BLCKSZ,
compressed_page,
MAXALIGN(header.compressed_size),
file->compress_alg);
if (uncompressed_size != BLCKSZ)
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
file->path, uncompressed_size);
if (validate_one_page(page, file, blknum,
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND__NOT_VALID)
is_valid = false;
}
else
{
if (validate_one_page(compressed_page, file, blknum,
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND__NOT_VALID)
is_valid = false;
}
}
return is_valid;
}

View File

@ -15,7 +15,8 @@
#include <unistd.h>
static int pgBackupDeleteFiles(pgBackup *backup);
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli);
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
uint32 xlog_seg_size);
int
do_delete(time_t backup_id)
@ -23,8 +24,8 @@ do_delete(time_t backup_id)
int i;
parray *backup_list,
*delete_list;
pgBackup *target_backup = NULL;
time_t parent_id = 0;
bool backup_found = false;
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
TimeLineID oldest_tli = 0;
@ -56,9 +57,9 @@ do_delete(time_t backup_id)
/* Save backup id to retreive increment backups */
parent_id = backup->start_time;
backup_found = true;
target_backup = backup;
}
else if (backup_found)
else if (target_backup)
{
if (backup->backup_mode != BACKUP_MODE_FULL &&
backup->parent_backup == parent_id)
@ -93,6 +94,8 @@ do_delete(time_t backup_id)
/* Clean WAL segments */
if (delete_wal)
{
Assert(target_backup);
/* Find oldest LSN, used by backups */
for (i = (int) parray_num(backup_list) - 1; i >= 0; i--)
{
@ -106,7 +109,7 @@ do_delete(time_t backup_id)
}
}
delete_walfiles(oldest_lsn, oldest_tli);
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
}
/* cleanup */
@ -225,7 +228,7 @@ do_retention_purge(void)
/* Purge WAL files */
if (delete_wal)
{
delete_walfiles(oldest_lsn, oldest_tli);
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
}
/* Cleanup */
@ -313,7 +316,8 @@ pgBackupDeleteFiles(pgBackup *backup)
* oldest_lsn.
*/
static void
delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli)
delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
uint32 xlog_seg_size)
{
XLogSegNo targetSegNo;
char oldestSegmentNeeded[MAXFNAMELEN];
@ -329,8 +333,9 @@ delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli)
if (!XLogRecPtrIsInvalid(oldest_lsn))
{
XLByteToSeg(oldest_lsn, targetSegNo);
XLogFileName(oldestSegmentNeeded, oldest_tli, targetSegNo);
GetXLogSegNo(oldest_lsn, targetSegNo, xlog_seg_size);
GetXLogFileName(oldestSegmentNeeded, oldest_tli, targetSegNo,
xlog_seg_size);
elog(LOG, "removing WAL segments older than %s", oldestSegmentNeeded);
}
@ -436,7 +441,7 @@ do_delete_instance(void)
parray_free(backup_list);
/* Delete all wal files. */
delete_walfiles(InvalidXLogRecPtr, 0);
delete_walfiles(InvalidXLogRecPtr, 0, xlog_seg_size);
/* Delete backup instance config file */
join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);

View File

@ -54,7 +54,7 @@ do_add_instance(void)
{
char path[MAXPGPATH];
char arclog_path_dir[MAXPGPATH];
struct stat st;
struct stat st;
pgBackupConfig *config = pgut_new(pgBackupConfig);
/* PGDATA is always required */
@ -64,6 +64,8 @@ do_add_instance(void)
/* Read system_identifier from PGDATA */
system_identifier = get_system_identifier(pgdata);
/* Starting from PostgreSQL 11 read WAL segment size from PGDATA */
xlog_seg_size = get_xlog_seg_size(pgdata);
/* Ensure that all root directories already exist */
if (access(backup_path, F_OK) != 0)
@ -97,6 +99,7 @@ do_add_instance(void)
*/
pgBackupConfigInit(config);
config->system_identifier = system_identifier;
config->xlog_seg_size = xlog_seg_size;
config->pgdata = pgdata;
writeBackupCatalogConfigFile(config);

View File

@ -80,7 +80,7 @@ do_merge(time_t backup_id)
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s if full backup",
elog(ERROR, "Backup %s is full backup",
base36enc(backup->start_time));
dest_backup = backup;
@ -320,9 +320,10 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
to_backup->data_bytes += file->write_size;
}
/* compute size of wal files of this backup stored in the archive */
if (!current.stream)
to_backup->wal_bytes = XLOG_SEG_SIZE *
(to_backup->stop_lsn / XLogSegSize - to_backup->start_lsn / XLogSegSize + 1);
if (!to_backup->stream)
to_backup->wal_bytes = xlog_seg_size *
(to_backup->stop_lsn / xlog_seg_size -
to_backup->start_lsn / xlog_seg_size + 1);
else
to_backup->wal_bytes = BYTES_INVALID;
@ -352,15 +353,10 @@ merge_files(void *arg)
merge_files_arg *argument = (merge_files_arg *) arg;
pgBackup *to_backup = argument->to_backup;
pgBackup *from_backup = argument->from_backup;
char tmp_file_path[MAXPGPATH];
int i,
num_files = parray_num(argument->files);
int to_root_len = strlen(argument->to_root);
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
join_path_components(tmp_file_path, argument->to_root, "tmp");
for (i = 0; i < num_files; i++)
{
pgFile *file = (pgFile *) parray_get(argument->files, i);
@ -433,8 +429,11 @@ merge_files(void *arg)
if (to_backup->compress_alg == PGLZ_COMPRESS ||
to_backup->compress_alg == ZLIB_COMPRESS)
{
char tmp_file_path[MAXPGPATH];
char *prev_path;
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_path_tmp);
/* Start the magic */
/*

View File

@ -89,6 +89,7 @@ typedef struct XLogPageReadPrivate
int thread_num;
const char *archivedir;
TimeLineID tli;
uint32 xlog_seg_size;
bool manual_switch;
bool need_switch;
@ -126,7 +127,8 @@ static int SimpleXLogPageRead(XLogReaderState *xlogreader,
TimeLineID *pageTLI);
static XLogReaderState *InitXLogPageRead(XLogPageReadPrivate *private_data,
const char *archivedir,
TimeLineID tli, bool allocate_reader);
TimeLineID tli, uint32 xlog_seg_size,
bool allocate_reader);
static void CleanupXLogPageRead(XLogReaderState *xlogreader);
static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data,
int elevel);
@ -160,7 +162,8 @@ switchToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg)
return false;
/* Adjust next record position */
XLogSegNoOffsetToRecPtr(private_data->xlogsegno, 0, arg->startpoint);
GetXLogRecPtr(private_data->xlogsegno, 0,
private_data->xlog_seg_size, arg->startpoint);
/* We need to close previously opened file if it wasn't closed earlier */
CleanupXLogPageRead(xlogreader);
/* Skip over the page header and contrecord if any */
@ -200,7 +203,12 @@ doExtractPageMap(void *arg)
char *errormsg;
private_data = &extract_arg->private_data;
#if PG_VERSION_NUM >= 110000
xlogreader = XLogReaderAllocate(private_data->xlog_seg_size,
&SimpleXLogPageRead, private_data);
#else
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, private_data);
#endif
if (xlogreader == NULL)
elog(ERROR, "Thread [%d]: out of memory", private_data->thread_num);
xlogreader->system_identifier = system_identifier;
@ -235,7 +243,7 @@ doExtractPageMap(void *arg)
if (interrupted)
elog(ERROR, "Thread [%d]: Interrupted during WAL reading",
private_data->thread_num);
private_data->thread_num);
/*
* We need to switch to the next WAL segment after reading previous
@ -292,7 +300,8 @@ doExtractPageMap(void *arg)
/* continue reading at next record */
extract_arg->startpoint = InvalidXLogRecPtr;
XLByteToSeg(xlogreader->EndRecPtr, nextSegNo);
GetXLogSegNo(xlogreader->EndRecPtr, nextSegNo,
private_data->xlog_seg_size);
} while (nextSegNo <= extract_arg->endSegNo &&
xlogreader->ReadRecPtr < extract_arg->endpoint);
@ -312,8 +321,8 @@ doExtractPageMap(void *arg)
* file.
*/
void
extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
XLogRecPtr endpoint, parray *files)
extractPageMap(const char *archivedir, TimeLineID tli, uint32 seg_size,
XLogRecPtr startpoint, XLogRecPtr endpoint, parray *files)
{
int i;
int threads_need = 0;
@ -333,7 +342,7 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
elog(ERROR, "Invalid endpoint value %X/%X",
(uint32) (endpoint >> 32), (uint32) (endpoint));
XLByteToSeg(endpoint, endSegNo);
GetXLogSegNo(endpoint, endSegNo, seg_size);
nextSegNoToRead = 0;
time(&start_time);
@ -349,7 +358,8 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
*/
for (i = 0; i < num_threads; i++)
{
InitXLogPageRead(&thread_args[i].private_data, archivedir, tli, false);
InitXLogPageRead(&thread_args[i].private_data, archivedir, tli,
seg_size, false);
thread_args[i].private_data.thread_num = i + 1;
thread_args[i].startpoint = startpoint;
@ -362,7 +372,7 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
/* Adjust startpoint to the next thread */
if (nextSegNoToRead == 0)
XLByteToSeg(startpoint, nextSegNoToRead);
GetXLogSegNo(startpoint, nextSegNoToRead, seg_size);
nextSegNoToRead++;
/*
@ -371,7 +381,7 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
*/
if (nextSegNoToRead > endSegNo)
break;
XLogSegNoOffsetToRecPtr(nextSegNoToRead, 0, startpoint);
GetXLogRecPtr(nextSegNoToRead, 0, seg_size, startpoint);
}
/* Run threads */
@ -405,7 +415,8 @@ extractPageMap(const char *archivedir, XLogRecPtr startpoint, TimeLineID tli,
*/
static void
validate_backup_wal_from_start_to_stop(pgBackup *backup,
char *backup_xlog_path, TimeLineID tli)
char *backup_xlog_path, TimeLineID tli,
uint32 xlog_seg_size)
{
XLogRecPtr startpoint = backup->start_lsn;
XLogRecord *record;
@ -414,7 +425,8 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
XLogPageReadPrivate private;
bool got_endpoint = false;
xlogreader = InitXLogPageRead(&private, backup_xlog_path, tli, true);
xlogreader = InitXLogPageRead(&private, backup_xlog_path, tli,
xlog_seg_size, true);
while (true)
{
@ -468,12 +480,10 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
* up to the given recovery target.
*/
void
validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
validate_wal(pgBackup *backup, const char *archivedir,
time_t target_time, TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli)
TimeLineID tli, uint32 seg_size)
{
XLogRecPtr startpoint = backup->start_lsn;
const char *backup_id;
@ -510,10 +520,12 @@ validate_wal(pgBackup *backup,
snprintf(backup_xlog_path, sizeof(backup_xlog_path), "/%s/%s/%s/%s",
backup_instance_path, backup_id, DATABASE_DIR, PG_XLOG_DIR);
validate_backup_wal_from_start_to_stop(backup, backup_xlog_path, tli);
validate_backup_wal_from_start_to_stop(backup, backup_xlog_path, tli,
seg_size);
}
else
validate_backup_wal_from_start_to_stop(backup, (char *) archivedir, tli);
validate_backup_wal_from_start_to_stop(backup, (char *) archivedir, tli,
seg_size);
if (backup->status == BACKUP_STATUS_CORRUPT)
{
@ -543,7 +555,8 @@ validate_wal(pgBackup *backup,
* up to the given recovery target.
* In any case we cannot restore to the point before stop_lsn.
*/
xlogreader = InitXLogPageRead(&private, archivedir, tli, true);
xlogreader = InitXLogPageRead(&private, archivedir, tli, seg_size,
true);
/* We can restore at least up to the backup end */
time2iso(last_timestamp, lengthof(last_timestamp), backup->recovery_time);
@ -639,7 +652,7 @@ validate_wal(pgBackup *backup,
* pg_stop_backup().
*/
bool
read_recovery_info(const char *archivedir, TimeLineID tli,
read_recovery_info(const char *archivedir, TimeLineID tli, uint32 seg_size,
XLogRecPtr start_lsn, XLogRecPtr stop_lsn,
time_t *recovery_time, TransactionId *recovery_xid)
{
@ -656,7 +669,7 @@ read_recovery_info(const char *archivedir, TimeLineID tli,
elog(ERROR, "Invalid stop_lsn value %X/%X",
(uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
xlogreader = InitXLogPageRead(&private, archivedir, tli, true);
xlogreader = InitXLogPageRead(&private, archivedir, tli, seg_size, true);
/* Read records from stop_lsn down to start_lsn */
do
@ -711,7 +724,7 @@ cleanup:
*/
bool
wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
TimeLineID target_tli)
TimeLineID target_tli, uint32 seg_size)
{
XLogReaderState *xlogreader;
XLogPageReadPrivate private;
@ -722,7 +735,8 @@ wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
elog(ERROR, "Invalid target_lsn value %X/%X",
(uint32) (target_lsn >> 32), (uint32) (target_lsn));
xlogreader = InitXLogPageRead(&private, archivedir, target_tli, true);
xlogreader = InitXLogPageRead(&private, archivedir, target_tli, seg_size,
true);
res = XLogReadRecord(xlogreader, target_lsn, &errormsg) != NULL;
/* Didn't find 'target_lsn' and there is no error, return false */
@ -761,13 +775,14 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
uint32 targetPageOff;
private_data = (XLogPageReadPrivate *) xlogreader->private_data;
targetPageOff = targetPagePtr % XLogSegSize;
targetPageOff = targetPagePtr % private_data->xlog_seg_size;
/*
* See if we need to switch to a new segment because the requested record
* is not in the currently open one.
*/
if (!XLByteInSeg(targetPagePtr, private_data->xlogsegno))
if (!IsInXLogSeg(targetPagePtr, private_data->xlogsegno,
private_data->xlog_seg_size))
{
elog(VERBOSE, "Thread [%d]: Need to switch to segno next to %X/%X, current LSN %X/%X",
private_data->thread_num,
@ -805,22 +820,24 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
}
}
XLByteToSeg(targetPagePtr, private_data->xlogsegno);
GetXLogSegNo(targetPagePtr, private_data->xlogsegno,
private_data->xlog_seg_size);
/* Try to switch to the next WAL segment */
if (!private_data->xlogexists)
{
char xlogfname[MAXFNAMELEN];
XLogFileName(xlogfname, private_data->tli, private_data->xlogsegno);
GetXLogFileName(xlogfname, private_data->tli, private_data->xlogsegno,
private_data->xlog_seg_size);
snprintf(private_data->xlogpath, MAXPGPATH, "%s/%s",
private_data->archivedir, xlogfname);
if (fileExists(private_data->xlogpath))
{
elog(LOG, "Thread [%d]: Opening WAL segment \"%s\"",
private_data->thread_num,
private_data->xlogpath);
private_data->thread_num,
private_data->xlogpath);
private_data->xlogexists = true;
private_data->xlogfile = open(private_data->xlogpath,
@ -829,9 +846,9 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
if (private_data->xlogfile < 0)
{
elog(WARNING, "Thread [%d]: Could not open WAL segment \"%s\": %s",
private_data->thread_num,
private_data->xlogpath,
strerror(errno));
private_data->thread_num,
private_data->xlogpath,
strerror(errno));
return -1;
}
}
@ -876,14 +893,14 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
if (lseek(private_data->xlogfile, (off_t) targetPageOff, SEEK_SET) < 0)
{
elog(WARNING, "Thread [%d]: Could not seek in WAL segment \"%s\": %s",
private_data->thread_num, private_data->xlogpath, strerror(errno));
private_data->thread_num, private_data->xlogpath, strerror(errno));
return -1;
}
if (read(private_data->xlogfile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
elog(WARNING, "Thread [%d]: Could not read from WAL segment \"%s\": %s",
private_data->thread_num, private_data->xlogpath, strerror(errno));
private_data->thread_num, private_data->xlogpath, strerror(errno));
return -1;
}
}
@ -919,18 +936,24 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
*/
static XLogReaderState *
InitXLogPageRead(XLogPageReadPrivate *private_data, const char *archivedir,
TimeLineID tli, bool allocate_reader)
TimeLineID tli, uint32 xlog_seg_size, bool allocate_reader)
{
XLogReaderState *xlogreader = NULL;
MemSet(private_data, 0, sizeof(XLogPageReadPrivate));
private_data->archivedir = archivedir;
private_data->tli = tli;
private_data->xlog_seg_size = xlog_seg_size;
private_data->xlogfile = -1;
if (allocate_reader)
{
#if PG_VERSION_NUM >= 110000
xlogreader = XLogReaderAllocate(xlog_seg_size,
&SimpleXLogPageRead, private_data);
#else
xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, private_data);
#endif
if (xlogreader == NULL)
elog(ERROR, "out of memory");
xlogreader->system_identifier = system_identifier;
@ -974,8 +997,8 @@ PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data, int elevel)
*/
if (!private_data->xlogexists)
elog(elevel, "Thread [%d]: WAL segment \"%s\" is absent",
private_data->thread_num,
private_data->xlogpath);
private_data->thread_num,
private_data->xlogpath);
else if (private_data->xlogfile != -1)
elog(elevel, "Thread [%d]: Possible WAL corruption. "
"Error has occured during reading WAL segment \"%s\"",

View File

@ -19,7 +19,7 @@
#include <unistd.h>
#include "pg_getopt.h"
const char *PROGRAM_VERSION = "2.0.20";
const char *PROGRAM_VERSION = "2.0.21";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
@ -93,6 +93,16 @@ bool compress_shortcut = false;
char *instance_name;
uint64 system_identifier = 0;
/*
* Starting from PostgreSQL 11 WAL segment size may vary. Prior to
* PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE.
*/
#if PG_VERSION_NUM >= 110000
uint32 xlog_seg_size = 0;
#else
uint32 xlog_seg_size = XLOG_SEG_SIZE;
#endif
/* archive push options */
static char *wal_file_path;
static char *wal_file_name;
@ -131,14 +141,14 @@ static pgut_option options[] =
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMDLINE },
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMDLINE },
{ 's', 'S', "slot", &replication_slot, SOURCE_CMDLINE },
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
{ 'b', 12, "delete-wal", &delete_wal, SOURCE_CMDLINE },
{ 'b', 13, "delete-expired", &delete_expired, SOURCE_CMDLINE },
{ 's', 14, "master-db", &master_db, SOURCE_CMDLINE, },
{ 's', 15, "master-host", &master_host, SOURCE_CMDLINE, },
{ 's', 16, "master-port", &master_port, SOURCE_CMDLINE, },
{ 's', 17, "master-user", &master_user, SOURCE_CMDLINE, },
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
/* TODO not completed feature. Make it unavailiable from user level
{ 'b', 18, "remote", &is_remote_backup, SOURCE_CMDLINE, }, */
/* restore options */
@ -172,8 +182,8 @@ static pgut_option options[] =
{ 's', 142, "log-filename", &log_filename, SOURCE_CMDLINE },
{ 's', 143, "error-log-filename", &error_log_filename, SOURCE_CMDLINE },
{ 's', 144, "log-directory", &log_directory, SOURCE_CMDLINE },
{ 'u', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'u', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MIN },
{ 'U', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
{ 'U', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
/* connection options */
{ 's', 'd', "pgdatabase", &pgut_dbname, SOURCE_CMDLINE },
{ 's', 'h', "pghost", &host, SOURCE_CMDLINE },
@ -184,6 +194,9 @@ static pgut_option options[] =
/* other options */
{ 'U', 150, "system-identifier", &system_identifier, SOURCE_FILE_STRICT },
{ 's', 151, "instance", &instance_name, SOURCE_CMDLINE },
#if PG_VERSION_NUM >= 110000
{ 'u', 152, "xlog-seg-size", &xlog_seg_size, SOURCE_FILE_STRICT},
#endif
/* archive-push options */
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
@ -211,6 +224,14 @@ main(int argc, char *argv[])
PROGRAM_NAME = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], "pgscripts");
#if PG_VERSION_NUM >= 110000
/*
* Reset WAL segment size, we will retreive it using RetrieveWalSegSize()
* later.
*/
WalSegSz = 0;
#endif
/*
* Save main thread's tid. It is used call exit() in case of errors.
*/
@ -393,7 +414,7 @@ main(int argc, char *argv[])
/* Read options from configuration file */
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
pgut_readopt(path, options, ERROR);
pgut_readopt(path, options, ERROR, true);
}
/* Initialize logger */
@ -406,6 +427,14 @@ main(int argc, char *argv[])
if (pgdata != NULL && !is_absolute_path(pgdata))
elog(ERROR, "-D, --pgdata must be an absolute path");
#if PG_VERSION_NUM >= 110000
/* Check xlog-seg-size option */
if (instance_name &&
backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
backup_subcmd != ADD_INSTANCE_CMD && !IsValidWalSegSize(xlog_seg_size))
elog(ERROR, "Invalid WAL segment size %u", xlog_seg_size);
#endif
/* Sanity check of --backup-id option */
if (backup_id_string != NULL)
{

View File

@ -172,11 +172,13 @@ typedef enum ShowFormat
typedef struct pgBackupConfig
{
uint64 system_identifier;
char *pgdata;
const char *pgdatabase;
const char *pghost;
const char *pgport;
const char *pguser;
uint32 xlog_seg_size;
char *pgdata;
const char *pgdatabase;
const char *pghost;
const char *pgport;
const char *pguser;
const char *master_host;
const char *master_port;
@ -191,8 +193,8 @@ typedef struct pgBackupConfig
char *log_filename;
char *error_log_filename;
char *log_directory;
int log_rotation_size;
int log_rotation_age;
uint64 log_rotation_size;
uint64 log_rotation_age;
uint32 retention_redundancy;
uint32 retention_window;
@ -324,6 +326,26 @@ typedef struct
strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \
strcmp((fname) + XLOG_FNAME_LEN, ".gz") == 0)
#if PG_VERSION_NUM >= 110000
#define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \
XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)
#define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \
XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest)
#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \
XLogFileName(fname, tli, logSegNo, wal_segsz_bytes)
#define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \
XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes)
#else
#define GetXLogSegNo(xlrp, logSegNo, wal_segsz_bytes) \
XLByteToSeg(xlrp, logSegNo)
#define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \
XLogSegNoOffsetToRecPtr(segno, offset, dest)
#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \
XLogFileName(fname, tli, logSegNo)
#define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \
XLByteInSeg(xlrp, logSegNo)
#endif
/* directory options */
extern char *backup_path;
extern char backup_instance_path[MAXPGPATH];
@ -384,6 +406,7 @@ extern const char* deparse_compress_alg(int alg);
/* other options */
extern char *instance_name;
extern uint64 system_identifier;
extern uint32 xlog_seg_size;
/* show options */
extern ShowFormat show_format;
@ -441,6 +464,8 @@ extern void writeBackupCatalogConfig(FILE *out, pgBackupConfig *config);
extern void writeBackupCatalogConfigFile(pgBackupConfig *config);
extern pgBackupConfig* readBackupCatalogConfigFile(void);
extern uint32 get_config_xlog_seg_size(void);
/* in show.c */
extern int do_show(time_t requested_backup_id);
@ -539,38 +564,44 @@ extern void get_wal_file(const char *from_path, const char *to_path);
extern bool calc_file_checksum(pgFile *file);
extern bool check_file_pages(pgFile* file,
XLogRecPtr stop_lsn, uint32 checksum_version);
/* parsexlog.c */
extern void extractPageMap(const char *datadir,
XLogRecPtr startpoint,
TimeLineID tli,
XLogRecPtr endpoint,
parray *backup_files_list);
extern void extractPageMap(const char *archivedir,
TimeLineID tli, uint32 seg_size,
XLogRecPtr startpoint, XLogRecPtr endpoint,
parray *files);
extern void validate_wal(pgBackup *backup,
const char *archivedir,
time_t target_time,
TransactionId target_xid,
XLogRecPtr target_lsn,
TimeLineID tli);
TimeLineID tli, uint32 seg_size);
extern bool read_recovery_info(const char *archivedir, TimeLineID tli,
uint32 seg_size,
XLogRecPtr start_lsn, XLogRecPtr stop_lsn,
time_t *recovery_time,
TransactionId *recovery_xid);
extern bool wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
TimeLineID target_tli);
TimeLineID target_tli, uint32 seg_size);
/* in util.c */
extern TimeLineID get_current_timeline(bool safe);
extern XLogRecPtr get_checkpoint_location(PGconn *conn);
extern uint64 get_system_identifier(char *pgdata);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern uint32 get_data_checksum_version(bool safe);
extern uint32 get_xlog_seg_size(char *pgdata_path);
extern void sanityChecks(void);
extern void time2iso(char *buf, size_t len, time_t time);
extern const char *status2str(BackupStatus status);
extern void remove_trailing_space(char *buf, int comment_mark);
extern void remove_not_digit(char *buf, size_t len, const char *str);
extern uint32 get_data_checksum_version(bool safe);
extern const char *base36enc(long unsigned int value);
extern char *base36enc_dup(long unsigned int value);
extern long unsigned int base36dec(const char *text);
extern uint64 get_system_identifier(char *pgdata);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern pg_time_t timestamptz_to_time_t(TimestampTz t);
extern int parse_server_version(char *server_version_str);

View File

@ -318,8 +318,8 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
* because it's needed to form the name of xlog file.
*/
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
rt->recovery_target_xid, rt->recovery_target_lsn,
base_full_backup->tli);
rt->recovery_target_xid, rt->recovery_target_lsn,
base_full_backup->tli, xlog_seg_size);
}
/* Orphinize every OK descendant of corrupted backup */
else
@ -627,7 +627,7 @@ restore_files(void *arg)
/* print size of restored file */
if (file->write_size != BYTES_INVALID)
elog(LOG, "Restored file %s : " INT64_FORMAT " bytes",
elog(VERBOSE, "Restored file %s : " INT64_FORMAT " bytes",
file->path, file->write_size);
}

View File

@ -19,6 +19,22 @@
#include "utils/json.h"
typedef struct ShowBackendRow
{
const char *instance;
const char *version;
char backup_id[20];
char recovery_time[100];
const char *mode;
const char *wal_mode;
char tli[20];
char duration[20];
char data_bytes[20];
char start_lsn[20];
char stop_lsn[20];
const char *status;
} ShowBackendRow;
static void show_instance_start(void);
static void show_instance_end(void);
@ -299,63 +315,194 @@ show_backup(time_t requested_backup_id)
static void
show_instance_plain(parray *backup_list, bool show_name)
{
#define SHOW_FIELDS_COUNT 12
int i;
const char *names[SHOW_FIELDS_COUNT] =
{ "Instance", "Version", "ID", "Recovery Time",
"Mode", "WAL", "Current/Parent TLI", "Time", "Data",
"Start LSN", "Stop LSN", "Status" };
const char *field_formats[SHOW_FIELDS_COUNT] =
{ " %-*s ", " %-*s ", " %-*s ", " %-*s ",
" %-*s ", " %-*s ", " %-*s ", " %*s ", " %*s ",
" %*s ", " %*s ", " %-*s "};
uint32 widths[SHOW_FIELDS_COUNT];
uint32 widths_sum = 0;
ShowBackendRow *rows;
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
widths[i] = strlen(names[i]);
rows = (ShowBackendRow *) palloc(parray_num(backup_list) *
sizeof(ShowBackendRow));
/*
* Fill row values and calculate maximum width of each field.
*/
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
ShowBackendRow *row = &rows[i];
int cur = 0;
/* Instance */
row->instance = instance_name;
widths[cur] = Max(widths[cur], strlen(row->instance));
cur++;
/* Version */
row->version = backup->server_version[0] ?
backup->server_version : "----";
widths[cur] = Max(widths[cur], strlen(row->version));
cur++;
/* ID */
snprintf(row->backup_id, lengthof(row->backup_id), "%s",
base36enc(backup->start_time));
widths[cur] = Max(widths[cur], strlen(row->backup_id));
cur++;
/* Recovery Time */
if (backup->recovery_time != (time_t) 0)
time2iso(row->recovery_time, lengthof(row->recovery_time),
backup->recovery_time);
else
StrNCpy(row->recovery_time, "----", 4);
widths[cur] = Max(widths[cur], strlen(row->recovery_time));
cur++;
/* Mode */
row->mode = pgBackupGetBackupMode(backup);
widths[cur] = Max(widths[cur], strlen(row->mode));
cur++;
/* WAL */
row->wal_mode = backup->stream ? "STREAM": "ARCHIVE";
widths[cur] = Max(widths[cur], strlen(row->wal_mode));
cur++;
/* Current/Parent TLI */
snprintf(row->tli, lengthof(row->tli), "%u / %u",
backup->tli, get_parent_tli(backup->tli));
widths[cur] = Max(widths[cur], strlen(row->tli));
cur++;
/* Time */
if (backup->end_time != (time_t) 0)
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
difftime(backup->end_time, backup->start_time));
else
StrNCpy(row->duration, "----", 4);
widths[cur] = Max(widths[cur], strlen(row->duration));
cur++;
/* Data */
pretty_size(backup->data_bytes, row->data_bytes,
lengthof(row->data_bytes));
widths[cur] = Max(widths[cur], strlen(row->data_bytes));
cur++;
/* Start LSN */
snprintf(row->start_lsn, lengthof(row->start_lsn), "%X/%X",
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn);
widths[cur] = Max(widths[cur], strlen(row->start_lsn));
cur++;
/* Stop LSN */
snprintf(row->stop_lsn, lengthof(row->stop_lsn), "%X/%X",
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn);
widths[cur] = Max(widths[cur], strlen(row->stop_lsn));
cur++;
/* Status */
row->status = status2str(backup->status);
widths[cur] = Max(widths[cur], strlen(row->status));
}
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
widths_sum += widths[i] + 2 /* two space */;
if (show_name)
appendPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
/* if you add new fields here, fix the header */
/* show header */
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
appendPQExpBufferStr(&show_buf,
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
appendPQExpBufferStr(&show_buf,
"============================================================================================================================================\n");
/*
* Print header.
*/
for (i = 0; i < widths_sum; i++)
appendPQExpBufferChar(&show_buf, '=');
appendPQExpBufferChar(&show_buf, '\n');
for (i = 0; i < SHOW_FIELDS_COUNT; i++)
{
appendPQExpBuffer(&show_buf, field_formats[i], widths[i], names[i]);
}
appendPQExpBufferChar(&show_buf, '\n');
for (i = 0; i < widths_sum; i++)
appendPQExpBufferChar(&show_buf, '=');
appendPQExpBufferChar(&show_buf, '\n');
/*
* Print values.
*/
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *backup = parray_get(backup_list, i);
TimeLineID parent_tli;
char timestamp[100] = "----";
char duration[20] = "----";
char data_bytes_str[10] = "----";
ShowBackendRow *row = &rows[i];
int cur = 0;
if (backup->recovery_time != (time_t) 0)
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
if (backup->end_time != (time_t) 0)
snprintf(duration, lengthof(duration), "%.*lfs", 0,
difftime(backup->end_time, backup->start_time));
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->instance);
cur++;
/*
* Calculate Data field, in the case of full backup this shows the
* total amount of data. For an differential backup, this size is only
* the difference of data accumulated.
*/
pretty_size(backup->data_bytes, data_bytes_str,
lengthof(data_bytes_str));
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->version);
cur++;
/* Get parent timeline before printing */
parent_tli = get_parent_tli(backup->tli);
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->backup_id);
cur++;
appendPQExpBuffer(&show_buf,
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
instance_name,
(backup->server_version[0] ? backup->server_version : "----"),
base36enc(backup->start_time),
timestamp,
pgBackupGetBackupMode(backup),
backup->stream ? "STREAM": "ARCHIVE",
backup->tli,
parent_tli,
duration,
data_bytes_str,
(uint32) (backup->start_lsn >> 32),
(uint32) backup->start_lsn,
(uint32) (backup->stop_lsn >> 32),
(uint32) backup->stop_lsn,
status2str(backup->status));
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->recovery_time);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->mode);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->wal_mode);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->tli);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->duration);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->data_bytes);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->start_lsn);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->stop_lsn);
cur++;
appendPQExpBuffer(&show_buf, field_formats[cur], widths[cur],
row->status);
cur++;
appendPQExpBufferChar(&show_buf, '\n');
}
pfree(rows);
}
/*

View File

@ -13,6 +13,9 @@
#include <time.h>
#include "storage/bufpage.h"
#if PG_VERSION_NUM >= 110000
#include "streamutil.h"
#endif
const char *
base36enc(long unsigned int value)
@ -122,6 +125,46 @@ get_current_timeline(bool safe)
return ControlFile.checkPointCopy.ThisTimeLineID;
}
/*
* Get last check point record ptr from pg_tonrol.
*/
XLogRecPtr
get_checkpoint_location(PGconn *conn)
{
#if PG_VERSION_NUM >= 90600
PGresult *res;
uint32 lsn_hi;
uint32 lsn_lo;
XLogRecPtr lsn;
#if PG_VERSION_NUM >= 100000
res = pgut_execute(conn,
"SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()",
0, NULL);
#else
res = pgut_execute(conn,
"SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()",
0, NULL);
#endif
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
PQclear(res);
/* Calculate LSN */
lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
return lsn;
#else
char *buffer;
size_t size;
ControlFileData ControlFile;
buffer = fetchFile(conn, "global/pg_control", &size);
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.checkPoint;
#endif
}
uint64
get_system_identifier(char *pgdata_path)
{
@ -172,6 +215,27 @@ get_remote_system_identifier(PGconn *conn)
#endif
}
uint32
get_xlog_seg_size(char *pgdata_path)
{
#if PG_VERSION_NUM >= 110000
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false);
if (buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);
return ControlFile.xlog_seg_size;
#else
return (uint32) XLOG_SEG_SIZE;
#endif
}
uint32
get_data_checksum_version(bool safe)
{

View File

@ -32,9 +32,9 @@ char *log_directory = NULL;
char log_path[MAXPGPATH] = "";
/* Maximum size of an individual log file in kilobytes */
int log_rotation_size = 0;
uint64 log_rotation_size = 0;
/* Maximum lifetime of an individual log file in minutes */
int log_rotation_age = 0;
uint64 log_rotation_age = 0;
/* Implementation for logging.h */

View File

@ -36,8 +36,8 @@ extern char log_path[MAXPGPATH];
#define LOG_ROTATION_SIZE_DEFAULT 0
#define LOG_ROTATION_AGE_DEFAULT 0
extern int log_rotation_size;
extern int log_rotation_age;
extern uint64 log_rotation_size;
extern uint64 log_rotation_age;
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF

View File

@ -19,15 +19,6 @@
#include "logger.h"
#include "pgut.h"
/* old gcc doesn't have LLONG_MAX. */
#ifndef LLONG_MAX
#if defined(HAVE_LONG_INT_64) || !defined(HAVE_LONG_LONG_INT_64)
#define LLONG_MAX LONG_MAX
#else
#define LLONG_MAX INT64CONST(0x7FFFFFFFFFFFFFFF)
#endif
#endif
#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
@ -100,11 +91,6 @@ static const unit_conversion memory_unit_conversion_table[] =
{"MB", OPTION_UNIT_XBLOCKS, 1024 / (XLOG_BLCKSZ / 1024)},
{"kB", OPTION_UNIT_XBLOCKS, -(XLOG_BLCKSZ / 1024)},
{"TB", OPTION_UNIT_XSEGS, (1024 * 1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
{"GB", OPTION_UNIT_XSEGS, (1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
{"MB", OPTION_UNIT_XSEGS, -(XLOG_SEG_SIZE / (1024 * 1024))},
{"kB", OPTION_UNIT_XSEGS, -(XLOG_SEG_SIZE / 1024)},
{""} /* end of table marker */
};
@ -303,7 +289,13 @@ convert_to_base_unit(int64 value, const char *unit,
if (table[i].multiplier < 0)
*base_value = value / (-table[i].multiplier);
else
{
/* Check for integer overflow first */
if (value > PG_INT64_MAX / table[i].multiplier)
return false;
*base_value = value * table[i].multiplier;
}
return true;
}
}
@ -333,7 +325,13 @@ convert_to_base_unit_u(uint64 value, const char *unit,
if (table[i].multiplier < 0)
*base_value = value / (-table[i].multiplier);
else
{
/* Check for integer overflow first */
if (value > PG_UINT64_MAX / table[i].multiplier)
return false;
*base_value = value * table[i].multiplier;
}
return true;
}
}
@ -371,6 +369,10 @@ convert_from_base_unit(int64 base_value, int base_unit,
*/
if (table[i].multiplier < 0)
{
/* Check for integer overflow first */
if (base_value > PG_INT64_MAX / (-table[i].multiplier))
continue;
*value = base_value * (-table[i].multiplier);
*unit = table[i].unit;
break;
@ -415,6 +417,10 @@ convert_from_base_unit_u(uint64 base_value, int base_unit,
*/
if (table[i].multiplier < 0)
{
/* Check for integer overflow first */
if (base_value > PG_UINT64_MAX / (-table[i].multiplier))
continue;
*value = base_value * (-table[i].multiplier);
*unit = table[i].unit;
break;
@ -612,7 +618,7 @@ parse_int32(const char *value, int32 *result, int flags)
if (strcmp(value, INFINITE_STR) == 0)
{
*result = INT_MAX;
*result = PG_INT32_MAX;
return true;
}
@ -621,12 +627,17 @@ parse_int32(const char *value, int32 *result, int flags)
if (endptr == value || (*endptr && flags == 0))
return false;
/* Check for integer overflow */
if (errno == ERANGE || val != (int64) ((int32) val))
return false;
if (!parse_unit(endptr, flags, val, &val))
return false;
/* Check for integer overflow again */
if (val != (int64) ((int32) val))
return false;
*result = val;
return true;
@ -644,7 +655,7 @@ parse_uint32(const char *value, uint32 *result, int flags)
if (strcmp(value, INFINITE_STR) == 0)
{
*result = UINT_MAX;
*result = PG_UINT32_MAX;
return true;
}
@ -653,12 +664,17 @@ parse_uint32(const char *value, uint32 *result, int flags)
if (endptr == value || (*endptr && flags == 0))
return false;
/* Check for integer overflow */
if (errno == ERANGE || val != (uint64) ((uint32) val))
return false;
if (!parse_unit_u(endptr, flags, val, &val))
return false;
/* Check for integer overflow again */
if (val != (uint64) ((uint32) val))
return false;
*result = val;
return true;
@ -676,7 +692,7 @@ parse_int64(const char *value, int64 *result, int flags)
if (strcmp(value, INFINITE_STR) == 0)
{
*result = LLONG_MAX;
*result = PG_INT64_MAX;
return true;
}
@ -714,13 +730,7 @@ parse_uint64(const char *value, uint64 *result, int flags)
if (strcmp(value, INFINITE_STR) == 0)
{
#if defined(HAVE_LONG_INT_64)
*result = ULONG_MAX;
#elif defined(HAVE_LONG_LONG_INT_64)
*result = ULLONG_MAX;
#else
*result = ULONG_MAX;
#endif
*result = PG_UINT64_MAX;
return true;
}
@ -1125,7 +1135,7 @@ key_equals(const char *lhs, const char *rhs)
* Return number of parsed options
*/
int
pgut_readopt(const char *path, pgut_option options[], int elevel)
pgut_readopt(const char *path, pgut_option options[], int elevel, bool strict)
{
FILE *fp;
char buf[1024];
@ -1165,7 +1175,7 @@ pgut_readopt(const char *path, pgut_option options[], int elevel)
break;
}
}
if (!options[i].type)
if (strict && !options[i].type)
elog(elevel, "invalid option \"%s\" in file \"%s\"", key, path);
}
}

View File

@ -111,7 +111,8 @@ extern bool in_cleanup;
extern bool in_password; /* User prompts password */
extern int pgut_getopt(int argc, char **argv, pgut_option options[]);
extern int pgut_readopt(const char *path, pgut_option options[], int elevel);
extern int pgut_readopt(const char *path, pgut_option options[], int elevel,
bool strict);
extern void pgut_getopt_env(pgut_option options[]);
extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata);
extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata);

View File

@ -24,6 +24,8 @@ typedef struct
{
parray *files;
bool corrupted;
XLogRecPtr stop_lsn;
uint32 checksum_version;
/*
* Return value from the thread.
@ -100,6 +102,8 @@ pgBackupValidate(pgBackup *backup)
arg->files = files;
arg->corrupted = false;
arg->stop_lsn = backup->stop_lsn;
arg->checksum_version = backup->checksum_version;
/* By default there are some error */
threads_args[i].ret = 1;
@ -207,7 +211,13 @@ pgBackupValidateFiles(void *arg)
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
file->path, file->crc, crc);
arguments->corrupted = true;
break;
/* validate relation blocks */
if (file->is_datafile)
{
if (!check_file_pages(file, arguments->stop_lsn, arguments->checksum_version))
arguments->corrupted = true;
}
}
}
@ -259,6 +269,8 @@ do_validate_all(void)
instance_name = dent->d_name;
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
xlog_seg_size = get_config_xlog_seg_size();
do_validate_instance();
}
}
@ -381,7 +393,7 @@ do_validate_instance(void)
/* Validate corresponding WAL files */
if (current_backup->status == BACKUP_STATUS_OK)
validate_wal(current_backup, arclog_path, 0,
0, 0, base_full_backup->tli);
0, 0, base_full_backup->tli, xlog_seg_size);
/*
* Mark every descendant of corrupted backup as orphan
@ -468,7 +480,8 @@ do_validate_instance(void)
//tmp_backup = find_parent_full_backup(dest_backup);
/* Revalidation successful, validate corresponding WAL files */
validate_wal(backup, arclog_path, 0,
0, 0, current_backup->tli);
0, 0, current_backup->tli,
xlog_seg_size);
}
}

View File

@ -93,7 +93,7 @@ class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase):
)
try:
self.node.start()
self.node.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -151,7 +151,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in tablespace dir"
)
try:
self.node.start()
self.node.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -189,7 +189,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node.start()
self.node.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -213,11 +213,13 @@ class CfsRestoreNoencTest(CfsRestoreBase):
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
self.node_new.cleanup()
node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
node_new.cleanup()
try:
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id)
self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id)
node_new.append_conf("postgresql.auto.conf",
"port = {0}".format(node_new.port))
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
@ -230,7 +232,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node_new.start()
node_new.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -240,10 +242,10 @@ class CfsRestoreNoencTest(CfsRestoreBase):
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
self.node_new.cleanup()
node_new.cleanup()
# @unittest.expectedFailure
# @unittest.skip("skip")
@ -255,11 +257,13 @@ class CfsRestoreNoencTest(CfsRestoreBase):
self.node.cleanup()
shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name))
self.node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
self.node_new.cleanup()
node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname))
node_new.cleanup()
try:
self.restore_node(self.backup_dir, 'node', self.node_new, backup_id=self.backup_id, options=['-j', '5'])
self.restore_node(self.backup_dir, 'node', node_new, backup_id=self.backup_id, options=['-j', '5'])
node_new.append_conf("postgresql.auto.conf",
"port = {0}".format(node_new.port))
except ProbackupException as e:
self.fail(
"ERROR: Restore from full backup failed. \n {0} \n {1}".format(
@ -272,7 +276,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in backup dir"
)
try:
self.node_new.start()
node_new.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -282,10 +286,10 @@ class CfsRestoreNoencTest(CfsRestoreBase):
)
self.assertEqual(
repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')),
repr(self.table_t1)
)
self.node_new.cleanup()
node_new.cleanup()
# @unittest.expectedFailure
# @unittest.skip("skip")
@ -319,7 +323,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in new tablespace location"
)
try:
self.node.start()
self.node.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(
@ -365,7 +369,7 @@ class CfsRestoreNoencTest(CfsRestoreBase):
"ERROR: File pg_compression not found in new tablespace location"
)
try:
self.node.start()
self.node.slow_start()
except ProbackupException as e:
self.fail(
"ERROR: Instance not started after restore. \n {0} \n {1}".format(

View File

@ -1 +1 @@
pg_probackup 2.0.20
pg_probackup 2.0.21

View File

@ -616,7 +616,8 @@ class ProbackupTest(object):
return self.run_pb(cmd_list + options, async, gdb)
def merge_backup(
self, backup_dir, instance, backup_id, async=False, gdb=False):
self, backup_dir, instance, backup_id, async=False,
gdb=False, options=[]):
cmd_list = [
"merge",
"-B", backup_dir,
@ -624,7 +625,7 @@ class ProbackupTest(object):
"-i", backup_id
]
return self.run_pb(cmd_list, async, gdb)
return self.run_pb(cmd_list + options, async, gdb)
def restore_node(
self, backup_dir, instance, node=False,

View File

@ -69,7 +69,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
self.merge_backup(backup_dir, "node", page_id,
options=["-j", "4"])
show_backups = self.show_pb(backup_dir, "node")
# sanity check

View File

@ -1241,3 +1241,81 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_zags_block_corrupt(self):
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.backup_node(backup_dir, 'node', node)
conn = node.connect()
with node.connect("postgres") as conn:
conn.execute(
"create table tbl(i int)")
conn.commit()
conn.execute(
"create index idx ON tbl (i)")
conn.commit()
conn.execute(
"insert into tbl select i from generate_series(0,400) as i")
conn.commit()
conn.execute(
"select pg_relation_size('idx')")
conn.commit()
conn.execute(
"delete from tbl where i < 100")
conn.commit()
conn.execute(
"explain analyze select i from tbl order by i")
conn.commit()
conn.execute(
"select i from tbl order by i")
conn.commit()
conn.execute(
"create extension pageinspect")
conn.commit()
print(conn.execute(
"select * from bt_page_stats('idx',1)"))
conn.commit()
conn.execute(
"insert into tbl select i from generate_series(0,100) as i")
conn.commit()
conn.execute(
"insert into tbl select i from generate_series(0,100) as i")
conn.commit()
conn.execute(
"insert into tbl select i from generate_series(0,100) as i")
conn.commit()
conn.execute(
"insert into tbl select i from generate_series(0,100) as i")
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname),
initdb_params=['--data-checksums'],
pg_options={'wal_level': 'replica'}
)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored)
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
exit(1)