mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2024-11-24 08:52:38 +02:00
Merge with maste
This commit is contained in:
commit
bb760c38c6
4
Makefile
4
Makefile
@ -1,8 +1,8 @@
|
||||
PROGRAM = pg_probackup
|
||||
|
||||
# utils
|
||||
OBJS = src/utils/json.o src/utils/logger.o src/utils/parray.o \
|
||||
src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o
|
||||
OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \
|
||||
src/utils/parray.o src/utils/pgut.o src/utils/thread.o src/utils/remote.o src/utils/file.o
|
||||
|
||||
OBJS += src/archive.o src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \
|
||||
|
@ -158,7 +158,8 @@ sub build_pgprobackup
|
||||
'validate.c'
|
||||
);
|
||||
$probackup->AddFiles(
|
||||
"$currpath/src/utils",
|
||||
"$currpath/src/utils",
|
||||
'configuration.c',
|
||||
'json.c',
|
||||
'logger.c',
|
||||
'parray.c',
|
||||
|
@ -28,7 +28,6 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
|
||||
char absolute_wal_file_path[MAXPGPATH];
|
||||
char current_dir[MAXPGPATH];
|
||||
int64 system_id;
|
||||
pgBackupConfig *config;
|
||||
bool is_compress = false;
|
||||
|
||||
if (wal_file_name == NULL && wal_file_path == NULL)
|
||||
@ -44,16 +43,16 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
|
||||
elog(ERROR, "getcwd() error");
|
||||
|
||||
/* verify that archive-push --instance parameter is valid */
|
||||
config = readBackupCatalogConfigFile();
|
||||
system_id = get_system_identifier(current_dir);
|
||||
|
||||
if (config->pgdata == NULL)
|
||||
if (instance_config.pgdata == NULL)
|
||||
elog(ERROR, "cannot read pg_probackup.conf for this instance");
|
||||
|
||||
if(system_id != config->system_identifier)
|
||||
if(system_id != instance_config.system_identifier)
|
||||
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
|
||||
"Instance '%s' should have SYSTEM_ID = %ld instead of %ld",
|
||||
wal_file_name, instance_name, config->system_identifier, system_id);
|
||||
wal_file_name, instance_name, instance_config.system_identifier,
|
||||
system_id);
|
||||
|
||||
/* Create 'archlog_path' directory. Do nothing if it already exists. */
|
||||
dir_create_dir(arclog_path, DIR_PERMISSION);
|
||||
@ -63,11 +62,11 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
|
||||
|
||||
elog(INFO, "pg_probackup archive-push from %s to %s", absolute_wal_file_path, backup_wal_file_path);
|
||||
|
||||
if (compress_alg == PGLZ_COMPRESS)
|
||||
if (instance_config.compress_alg == PGLZ_COMPRESS)
|
||||
elog(ERROR, "pglz compression is not supported");
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
if (compress_alg == ZLIB_COMPRESS)
|
||||
if (instance_config.compress_alg == ZLIB_COMPRESS)
|
||||
is_compress = IsXLogFileName(wal_file_name);
|
||||
#endif
|
||||
|
||||
|
233
src/backup.c
233
src/backup.c
@ -26,8 +26,6 @@
|
||||
#include "utils/file.h"
|
||||
|
||||
|
||||
#define PG_STOP_BACKUP_TIMEOUT 300
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
@ -111,8 +109,8 @@ static int checkpoint_timeout(void);
|
||||
|
||||
//static void backup_list_file(parray *files, const char *root, )
|
||||
static void parse_backup_filelist_filenames(parray *files, const char *root);
|
||||
static void wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn,
|
||||
bool wait_prev_segment);
|
||||
static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn,
|
||||
bool wait_prev_segment);
|
||||
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup);
|
||||
static void make_pagemap_from_ptrack(parray *files);
|
||||
static void *StreamLog(void *arg);
|
||||
@ -154,7 +152,10 @@ get_remote_pgdata_filelist(parray *files)
|
||||
int resultStatus;
|
||||
int i;
|
||||
|
||||
backup_conn_replication = pgut_connect_replication(pgut_dbname);
|
||||
backup_conn_replication = pgut_connect_replication(instance_config.pghost,
|
||||
instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
|
||||
if (PQsendQuery(backup_conn_replication, "FILE_BACKUP FILELIST") == 0)
|
||||
elog(ERROR,"%s: could not send replication command \"%s\": %s",
|
||||
@ -310,7 +311,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
to_path, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
INIT_TRADITIONAL_CRC32(file->crc);
|
||||
INIT_FILE_CRC32(true, file->crc);
|
||||
|
||||
/* read from stream and write to backup file */
|
||||
while (1)
|
||||
@ -336,14 +337,14 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
{
|
||||
write_buffer_size = Min(row_length, sizeof(buf));
|
||||
memcpy(buf, copybuf, write_buffer_size);
|
||||
COMP_TRADITIONAL_CRC32(file->crc, buf, write_buffer_size);
|
||||
COMP_FILE_CRC32(true, file->crc, buf, write_buffer_size);
|
||||
|
||||
/* TODO calc checksum*/
|
||||
if (fwrite(buf, 1, write_buffer_size, out) != write_buffer_size)
|
||||
{
|
||||
errno_tmp = errno;
|
||||
/* oops */
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
FIN_FILE_CRC32(true, file->crc);
|
||||
fclose(out);
|
||||
PQfinish(conn);
|
||||
elog(ERROR, "cannot write to \"%s\": %s", to_path,
|
||||
@ -367,7 +368,7 @@ remote_copy_file(PGconn *conn, pgFile* file)
|
||||
}
|
||||
|
||||
file->write_size = (int64) file->read_size;
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
FIN_FILE_CRC32(true, file->crc);
|
||||
|
||||
fclose(out);
|
||||
}
|
||||
@ -401,7 +402,10 @@ remote_backup_files(void *arg)
|
||||
if (!pg_atomic_test_set_flag(&file->lock))
|
||||
continue;
|
||||
|
||||
file_backup_conn = pgut_connect_replication(pgut_dbname);
|
||||
file_backup_conn = pgut_connect_replication(instance_config.pghost,
|
||||
instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
|
||||
/* check for interrupt */
|
||||
if (interrupted)
|
||||
@ -494,16 +498,19 @@ do_backup_instance(void)
|
||||
TimeLineID starttli;
|
||||
XLogRecPtr startpos;
|
||||
|
||||
backup_conn_replication = pgut_connect_replication(pgut_dbname);
|
||||
backup_conn_replication = pgut_connect_replication(instance_config.pghost,
|
||||
instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
|
||||
/* Check replication prorocol connection */
|
||||
if (!RunIdentifySystem(backup_conn_replication, &sysidentifier, &starttli, &startpos, NULL))
|
||||
elog(ERROR, "Failed to send command for remote backup");
|
||||
|
||||
// TODO implement the check
|
||||
// if (&sysidentifier != system_identifier)
|
||||
// if (&sysidentifier != instance_config.system_identifier)
|
||||
// elog(ERROR, "Backup data directory was initialized for system id %ld, but target backup directory system id is %ld",
|
||||
// system_identifier, sysidentifier);
|
||||
// instance_config.system_identifier, sysidentifier);
|
||||
|
||||
current.tli = starttli;
|
||||
|
||||
@ -584,7 +591,10 @@ do_backup_instance(void)
|
||||
/*
|
||||
* Connect in replication mode to the server.
|
||||
*/
|
||||
stream_thread_arg.conn = pgut_connect_replication(pgut_dbname);
|
||||
stream_thread_arg.conn = pgut_connect_replication(instance_config.pghost,
|
||||
instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
|
||||
if (!CheckServerVersionForStreaming(stream_thread_arg.conn))
|
||||
{
|
||||
@ -625,7 +635,8 @@ do_backup_instance(void)
|
||||
if (is_remote_backup)
|
||||
get_remote_pgdata_filelist(backup_files_list);
|
||||
else
|
||||
dir_list_file(backup_files_list, pgdata, true, true, false);
|
||||
dir_list_file(backup_files_list, instance_config.pgdata,
|
||||
true, true, false);
|
||||
|
||||
/*
|
||||
* Sort pathname ascending. It is necessary to create intermediate
|
||||
@ -641,7 +652,7 @@ do_backup_instance(void)
|
||||
parray_qsort(backup_files_list, pgFileComparePath);
|
||||
|
||||
/* Extract information about files in backup_list parsing their names:*/
|
||||
parse_backup_filelist_filenames(backup_files_list, pgdata);
|
||||
parse_backup_filelist_filenames(backup_files_list, instance_config.pgdata);
|
||||
|
||||
if (current.backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
@ -662,7 +673,7 @@ do_backup_instance(void)
|
||||
* reading WAL segments present in archives up to the point
|
||||
* where this backup has started.
|
||||
*/
|
||||
extractPageMap(arclog_path, current.tli, xlog_seg_size,
|
||||
extractPageMap(arclog_path, current.tli, instance_config.xlog_seg_size,
|
||||
prev_backup->start_lsn, current.start_lsn,
|
||||
backup_files_list);
|
||||
}
|
||||
@ -689,7 +700,7 @@ do_backup_instance(void)
|
||||
char database_path[MAXPGPATH];
|
||||
|
||||
if (!is_remote_backup)
|
||||
dir_name = GetRelativePath(file->path, pgdata);
|
||||
dir_name = GetRelativePath(file->path, instance_config.pgdata);
|
||||
else
|
||||
dir_name = file->path;
|
||||
|
||||
@ -719,7 +730,7 @@ do_backup_instance(void)
|
||||
{
|
||||
backup_files_arg *arg = &(threads_args[i]);
|
||||
|
||||
arg->from_root = pgdata;
|
||||
arg->from_root = instance_config.pgdata;
|
||||
arg->to_root = database_path;
|
||||
arg->files_list = backup_files_list;
|
||||
arg->prev_filelist = prev_backup_filelist;
|
||||
@ -770,7 +781,8 @@ do_backup_instance(void)
|
||||
{
|
||||
char pg_control_path[MAXPGPATH];
|
||||
|
||||
snprintf(pg_control_path, sizeof(pg_control_path), "%s/%s", pgdata, "global/pg_control");
|
||||
snprintf(pg_control_path, sizeof(pg_control_path), "%s/%s",
|
||||
instance_config.pgdata, "global/pg_control");
|
||||
|
||||
for (i = 0; i < parray_num(backup_files_list); i++)
|
||||
{
|
||||
@ -823,7 +835,7 @@ do_backup_instance(void)
|
||||
}
|
||||
|
||||
/* Print the list of files to backup catalog */
|
||||
write_backup_filelist(¤t, backup_files_list, pgdata);
|
||||
write_backup_filelist(¤t, backup_files_list, instance_config.pgdata);
|
||||
|
||||
/* Compute summary of size of regular files in the backup */
|
||||
for (i = 0; i < parray_num(backup_files_list); i++)
|
||||
@ -856,9 +868,8 @@ do_backup_instance(void)
|
||||
int
|
||||
do_backup(time_t start_time)
|
||||
{
|
||||
|
||||
/* PGDATA and BACKUP_MODE are always required */
|
||||
if (pgdata == NULL)
|
||||
if (instance_config.pgdata == NULL)
|
||||
elog(ERROR, "required parameter not specified: PGDATA "
|
||||
"(-D, --pgdata)");
|
||||
if (current.backup_mode == BACKUP_MODE_INVALID)
|
||||
@ -866,7 +877,9 @@ do_backup(time_t start_time)
|
||||
"(-b, --backup-mode)");
|
||||
|
||||
/* Create connection for PostgreSQL */
|
||||
backup_conn = pgut_connect(pgut_dbname);
|
||||
backup_conn = pgut_connect(instance_config.pghost, instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
pgut_atexit_push(backup_disconnect, NULL);
|
||||
|
||||
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
|
||||
@ -876,8 +889,8 @@ do_backup(time_t start_time)
|
||||
elog(ERROR, "Failed to retreive wal_segment_size");
|
||||
#endif
|
||||
|
||||
current.compress_alg = compress_alg;
|
||||
current.compress_level = compress_level;
|
||||
current.compress_alg = instance_config.compress_alg;
|
||||
current.compress_level = instance_config.compress_level;
|
||||
|
||||
/* Confirm data block size and xlog block size are compatible */
|
||||
confirm_block_size("block_size", BLCKSZ);
|
||||
@ -926,11 +939,14 @@ do_backup(time_t start_time)
|
||||
if (current.from_replica && exclusive_backup)
|
||||
{
|
||||
/* Check master connection options */
|
||||
if (master_host == NULL)
|
||||
if (instance_config.master_host == NULL)
|
||||
elog(ERROR, "Options for connection to master must be provided to perform backup from replica");
|
||||
|
||||
/* Create connection to master server */
|
||||
master_conn = pgut_connect_extended(master_host, master_port, master_db, master_user);
|
||||
master_conn = pgut_connect(instance_config.master_host,
|
||||
instance_config.master_port,
|
||||
instance_config.master_db,
|
||||
instance_config.master_user);
|
||||
}
|
||||
|
||||
/* Get exclusive lock of backup catalog */
|
||||
@ -969,9 +985,9 @@ do_backup(time_t start_time)
|
||||
/* compute size of wal files of this backup stored in the archive */
|
||||
if (!current.stream)
|
||||
{
|
||||
current.wal_bytes = xlog_seg_size *
|
||||
(current.stop_lsn / xlog_seg_size -
|
||||
current.start_lsn / xlog_seg_size + 1);
|
||||
current.wal_bytes = instance_config.xlog_seg_size *
|
||||
(current.stop_lsn / instance_config.xlog_seg_size -
|
||||
current.start_lsn / instance_config.xlog_seg_size + 1);
|
||||
}
|
||||
|
||||
/* Backup is done. Update backup status */
|
||||
@ -1086,17 +1102,17 @@ check_system_identifiers(void)
|
||||
uint64 system_id_conn;
|
||||
uint64 system_id_pgdata;
|
||||
|
||||
system_id_pgdata = get_system_identifier(pgdata);
|
||||
system_id_pgdata = get_system_identifier(instance_config.pgdata);
|
||||
system_id_conn = get_remote_system_identifier(backup_conn);
|
||||
|
||||
if (system_id_conn != system_identifier)
|
||||
if (system_id_conn != instance_config.system_identifier)
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
|
||||
"but connected instance system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_conn);
|
||||
if (system_id_pgdata != system_identifier)
|
||||
instance_config.system_identifier, system_id_conn);
|
||||
if (system_id_pgdata != instance_config.system_identifier)
|
||||
elog(ERROR, "Backup data directory was initialized for system id " UINT64_FORMAT ", "
|
||||
"but target backup directory system id is " UINT64_FORMAT,
|
||||
system_identifier, system_id_pgdata);
|
||||
instance_config.system_identifier, system_id_pgdata);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1180,7 +1196,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
/* In PAGE mode wait for current segment... */
|
||||
wait_wal_lsn(backup->start_lsn, true, false);
|
||||
wait_wal_lsn(backup->start_lsn, true, false);
|
||||
/*
|
||||
* Do not wait start_lsn for stream backup.
|
||||
* Because WAL streaming will start after pg_start_backup() in stream
|
||||
@ -1338,7 +1354,9 @@ pg_ptrack_clear(void)
|
||||
dbOid = atoi(PQgetvalue(res_db, i, 1));
|
||||
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
||||
|
||||
tmp_conn = pgut_connect(dbname);
|
||||
tmp_conn = pgut_connect(instance_config.pghost, instance_config.pgport,
|
||||
dbname,
|
||||
instance_config.pguser);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
|
||||
0, NULL);
|
||||
PQclear(res);
|
||||
@ -1454,7 +1472,9 @@ pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp_conn = pgut_connect(dbname);
|
||||
tmp_conn = pgut_connect(instance_config.pghost, instance_config.pgport,
|
||||
dbname,
|
||||
instance_config.pguser);
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
@ -1516,8 +1536,11 @@ pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
* be archived in archive 'wal' directory regardless stream mode.
|
||||
*
|
||||
* If 'wait_prev_segment' wait for previous segment.
|
||||
*
|
||||
* Returns LSN of last valid record if wait_prev_segment is not true, otherwise
|
||||
* returns InvalidXLogRecPtr.
|
||||
*/
|
||||
static void
|
||||
static XLogRecPtr
|
||||
wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
{
|
||||
TimeLineID tli;
|
||||
@ -1537,10 +1560,11 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
tli = get_current_timeline(false);
|
||||
|
||||
/* Compute the name of the WAL file containig requested LSN */
|
||||
GetXLogSegNo(lsn, targetSegNo, xlog_seg_size);
|
||||
GetXLogSegNo(lsn, targetSegNo, instance_config.xlog_seg_size);
|
||||
if (wait_prev_segment)
|
||||
targetSegNo--;
|
||||
GetXLogFileName(wal_segment, tli, targetSegNo, xlog_seg_size);
|
||||
GetXLogFileName(wal_segment, tli, targetSegNo,
|
||||
instance_config.xlog_seg_size);
|
||||
|
||||
/*
|
||||
* In pg_start_backup we wait for 'lsn' in 'pg_wal' directory if it is
|
||||
@ -1556,26 +1580,22 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
DATABASE_DIR, PG_XLOG_DIR);
|
||||
join_path_components(wal_segment_path, pg_wal_dir, wal_segment);
|
||||
wal_segment_dir = pg_wal_dir;
|
||||
|
||||
timeout = (uint32) checkpoint_timeout();
|
||||
timeout = timeout + timeout * 0.1;
|
||||
}
|
||||
else
|
||||
{
|
||||
join_path_components(wal_segment_path, arclog_path, wal_segment);
|
||||
wal_segment_dir = arclog_path;
|
||||
|
||||
if (archive_timeout > 0)
|
||||
timeout = archive_timeout;
|
||||
else
|
||||
timeout = ARCHIVE_TIMEOUT_DEFAULT;
|
||||
|
||||
}
|
||||
|
||||
if (instance_config.archive_timeout > 0)
|
||||
timeout = instance_config.archive_timeout;
|
||||
else
|
||||
timeout = ARCHIVE_TIMEOUT_DEFAULT;
|
||||
|
||||
if (wait_prev_segment)
|
||||
elog(LOG, "Looking for segment: %s", wal_segment);
|
||||
else
|
||||
elog(LOG, "Looking for LSN: %X/%X in segment: %s",
|
||||
elog(LOG, "Looking for LSN %X/%X in segment: %s",
|
||||
(uint32) (lsn >> 32), (uint32) lsn, wal_segment);
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
@ -1607,16 +1627,39 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
{
|
||||
/* Do not check LSN for previous WAL segment */
|
||||
if (wait_prev_segment)
|
||||
return;
|
||||
return InvalidXLogRecPtr;
|
||||
|
||||
/*
|
||||
* A WAL segment found. Check LSN on it.
|
||||
*/
|
||||
if (wal_contains_lsn(wal_segment_dir, lsn, tli, xlog_seg_size))
|
||||
if (wal_contains_lsn(wal_segment_dir, lsn, tli,
|
||||
instance_config.xlog_seg_size))
|
||||
/* Target LSN was found */
|
||||
{
|
||||
elog(LOG, "Found LSN: %X/%X", (uint32) (lsn >> 32), (uint32) lsn);
|
||||
return;
|
||||
return lsn;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we failed to get LSN of valid record in a reasonable time, try
|
||||
* to get LSN of last valid record prior to the target LSN. But only
|
||||
* in case of a backup from a replica.
|
||||
*/
|
||||
if (!exclusive_backup && current.from_replica &&
|
||||
(try_count > timeout / 4))
|
||||
{
|
||||
XLogRecPtr res;
|
||||
|
||||
res = get_last_wal_lsn(wal_segment_dir, current.start_lsn,
|
||||
lsn, tli, false,
|
||||
instance_config.xlog_seg_size);
|
||||
if (!XLogRecPtrIsInvalid(res))
|
||||
{
|
||||
/* LSN of the prior record was found */
|
||||
elog(LOG, "Found prior LSN: %X/%X, it is used as stop LSN",
|
||||
(uint32) (res >> 32), (uint32) res);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1710,11 +1753,12 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
elog(INFO, "Wait for target LSN %X/%X to be received by replica",
|
||||
(uint32) (lsn >> 32), (uint32) lsn);
|
||||
|
||||
if (replica_timeout > 0 && try_count > replica_timeout)
|
||||
if (instance_config.replica_timeout > 0 &&
|
||||
try_count > instance_config.replica_timeout)
|
||||
elog(ERROR, "Target LSN %X/%X could not be recevied by replica "
|
||||
"in %d seconds",
|
||||
(uint32) (lsn >> 32), (uint32) lsn,
|
||||
replica_timeout);
|
||||
instance_config.replica_timeout);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1738,6 +1782,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
size_t len;
|
||||
char *val = NULL;
|
||||
char *stop_backup_query = NULL;
|
||||
bool stop_lsn_exists = false;
|
||||
|
||||
/*
|
||||
* We will use this values if there are no transactions between start_lsn
|
||||
@ -1816,7 +1861,11 @@ pg_stop_backup(pgBackup *backup)
|
||||
#endif
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
" FROM pg_catalog.pg_stop_backup(false, false)";
|
||||
#else
|
||||
" FROM pg_catalog.pg_stop_backup(false)";
|
||||
#endif
|
||||
else
|
||||
stop_backup_query = "SELECT"
|
||||
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
||||
@ -1824,7 +1873,11 @@ pg_stop_backup(pgBackup *backup)
|
||||
" lsn,"
|
||||
" labelfile,"
|
||||
" spcmapfile"
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
" FROM pg_catalog.pg_stop_backup(false, false)";
|
||||
#else
|
||||
" FROM pg_catalog.pg_stop_backup(false)";
|
||||
#endif
|
||||
|
||||
}
|
||||
else
|
||||
@ -1842,8 +1895,8 @@ pg_stop_backup(pgBackup *backup)
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the result of pg_stop_backup(),
|
||||
* but no longer than PG_STOP_BACKUP_TIMEOUT seconds
|
||||
* Wait for the result of pg_stop_backup(), but no longer than
|
||||
* archive_timeout seconds
|
||||
*/
|
||||
if (pg_stop_backup_is_sent && !in_cleanup)
|
||||
{
|
||||
@ -1866,14 +1919,14 @@ pg_stop_backup(pgBackup *backup)
|
||||
elog(INFO, "wait for pg_stop_backup()");
|
||||
|
||||
/*
|
||||
* If postgres haven't answered in PG_STOP_BACKUP_TIMEOUT seconds,
|
||||
* If postgres haven't answered in archive_timeout seconds,
|
||||
* send an interrupt.
|
||||
*/
|
||||
if (pg_stop_backup_timeout > PG_STOP_BACKUP_TIMEOUT)
|
||||
if (pg_stop_backup_timeout > instance_config.archive_timeout)
|
||||
{
|
||||
pgut_cancel(conn);
|
||||
elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it",
|
||||
PG_STOP_BACKUP_TIMEOUT);
|
||||
instance_config.archive_timeout);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1913,7 +1966,29 @@ pg_stop_backup(pgBackup *backup)
|
||||
if (!XRecOffIsValid(stop_backup_lsn))
|
||||
{
|
||||
if (XRecOffIsNull(stop_backup_lsn))
|
||||
stop_backup_lsn = stop_backup_lsn + SizeOfXLogLongPHD;
|
||||
{
|
||||
char *xlog_path,
|
||||
stream_xlog_path[MAXPGPATH];
|
||||
|
||||
if (stream_wal)
|
||||
{
|
||||
pgBackupGetPath2(backup, stream_xlog_path,
|
||||
lengthof(stream_xlog_path),
|
||||
DATABASE_DIR, PG_XLOG_DIR);
|
||||
xlog_path = stream_xlog_path;
|
||||
}
|
||||
else
|
||||
xlog_path = arclog_path;
|
||||
|
||||
stop_backup_lsn = get_last_wal_lsn(xlog_path, backup->start_lsn,
|
||||
stop_backup_lsn, backup->tli,
|
||||
true, instance_config.xlog_seg_size);
|
||||
/*
|
||||
* Do not check existance of LSN again below using
|
||||
* wait_wal_lsn().
|
||||
*/
|
||||
stop_lsn_exists = true;
|
||||
}
|
||||
else
|
||||
elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
|
||||
(uint32) (stop_backup_lsn >> 32), (uint32) (stop_backup_lsn));
|
||||
@ -2017,13 +2092,15 @@ pg_stop_backup(pgBackup *backup)
|
||||
stream_xlog_path[MAXPGPATH];
|
||||
|
||||
/* Wait for stop_lsn to be received by replica */
|
||||
if (current.from_replica)
|
||||
wait_replica_wal_lsn(stop_backup_lsn, false);
|
||||
/* XXX Do we need this? */
|
||||
// if (current.from_replica)
|
||||
// wait_replica_wal_lsn(stop_backup_lsn, false);
|
||||
/*
|
||||
* Wait for stop_lsn to be archived or streamed.
|
||||
* We wait for stop_lsn in stream mode just in case.
|
||||
*/
|
||||
wait_wal_lsn(stop_backup_lsn, false, false);
|
||||
if (!stop_lsn_exists)
|
||||
stop_backup_lsn = wait_wal_lsn(stop_backup_lsn, false, false);
|
||||
|
||||
if (stream_wal)
|
||||
{
|
||||
@ -2041,7 +2118,8 @@ pg_stop_backup(pgBackup *backup)
|
||||
elog(LOG, "Getting the Recovery Time from WAL");
|
||||
|
||||
/* iterate over WAL from stop_backup lsn to start_backup lsn */
|
||||
if (!read_recovery_info(xlog_path, backup->tli, xlog_seg_size,
|
||||
if (!read_recovery_info(xlog_path, backup->tli,
|
||||
instance_config.xlog_seg_size,
|
||||
backup->start_lsn, backup->stop_lsn,
|
||||
&backup->recovery_time, &backup->recovery_xid))
|
||||
{
|
||||
@ -2214,16 +2292,20 @@ backup_files(void *arg)
|
||||
if (!backup_data_file(arguments, to_path, file,
|
||||
arguments->prev_start_lsn,
|
||||
current.backup_mode,
|
||||
compress_alg, compress_level))
|
||||
instance_config.compress_alg,
|
||||
instance_config.compress_level))
|
||||
{
|
||||
file->write_size = BYTES_INVALID;
|
||||
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if (strcmp(file->name, "pg_control") == 0)
|
||||
copy_pgcontrol_file(arguments->from_root, arguments->to_root,
|
||||
file, FIO_BACKUP_HOST);
|
||||
else
|
||||
{
|
||||
bool skip = false;
|
||||
bool skip = false;
|
||||
|
||||
/* If non-data file has not changed since last backup... */
|
||||
if (prev_file && file->exists_in_prev &&
|
||||
@ -2416,9 +2498,9 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
|
||||
|
||||
rel_path = relpathperm(rnode, forknum);
|
||||
if (segno > 0)
|
||||
path = psprintf("%s/%s.%u", pgdata, rel_path, segno);
|
||||
path = psprintf("%s/%s.%u", instance_config.pgdata, rel_path, segno);
|
||||
else
|
||||
path = psprintf("%s/%s", pgdata, rel_path);
|
||||
path = psprintf("%s/%s", instance_config.pgdata, rel_path);
|
||||
|
||||
pg_free(rel_path);
|
||||
|
||||
@ -2609,7 +2691,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
|
||||
|
||||
if (!XLogRecPtrIsInvalid(stop_backup_lsn))
|
||||
{
|
||||
if (xlogpos > stop_backup_lsn)
|
||||
if (xlogpos >= stop_backup_lsn)
|
||||
{
|
||||
stop_stream_lsn = xlogpos;
|
||||
return true;
|
||||
@ -2658,7 +2740,7 @@ StreamLog(void *arg)
|
||||
/*
|
||||
* Always start streaming at the beginning of a segment
|
||||
*/
|
||||
startpos -= startpos % xlog_seg_size;
|
||||
startpos -= startpos % instance_config.xlog_seg_size;
|
||||
|
||||
/* Initialize timeout */
|
||||
stream_stop_timeout = 0;
|
||||
@ -2772,7 +2854,10 @@ pg_ptrack_get_block(backup_files_arg *arguments,
|
||||
|
||||
if (arguments->backup_conn == NULL)
|
||||
{
|
||||
arguments->backup_conn = pgut_connect(pgut_dbname);
|
||||
arguments->backup_conn = pgut_connect(instance_config.pghost,
|
||||
instance_config.pgport,
|
||||
instance_config.pgdatabase,
|
||||
instance_config.pguser);
|
||||
}
|
||||
|
||||
if (arguments->cancel_conn == NULL)
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
#include "utils/file.h"
|
||||
#include "utils/configuration.h"
|
||||
|
||||
static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"};
|
||||
static pgBackup *readBackupControlFile(const char *path);
|
||||
@ -560,7 +561,7 @@ readBackupControlFile(const char *path)
|
||||
char *compress_alg = NULL;
|
||||
int parsed_options;
|
||||
|
||||
pgut_option options[] =
|
||||
ConfigOption options[] =
|
||||
{
|
||||
{'s', 0, "backup-mode", &backup_mode, SOURCE_FILE_STRICT},
|
||||
{'u', 0, "timelineid", &backup->tli, SOURCE_FILE_STRICT},
|
||||
@ -595,7 +596,7 @@ readBackupControlFile(const char *path)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
parsed_options = pgut_readopt(path, options, WARNING, true);
|
||||
parsed_options = config_read_opt(path, options, WARNING, true);
|
||||
|
||||
if (parsed_options == 0)
|
||||
{
|
||||
|
657
src/configure.c
657
src/configure.c
@ -9,124 +9,268 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include "utils/configuration.h"
|
||||
#include "utils/json.h"
|
||||
|
||||
|
||||
static void opt_log_level_console(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_file(pgut_option *opt, const char *arg);
|
||||
static void opt_compress_alg(pgut_option *opt, const char *arg);
|
||||
static void assign_log_level_console(ConfigOption *opt, const char *arg);
|
||||
static void assign_log_level_file(ConfigOption *opt, const char *arg);
|
||||
static void assign_compress_alg(ConfigOption *opt, const char *arg);
|
||||
|
||||
static char *get_log_level_console(ConfigOption *opt);
|
||||
static char *get_log_level_file(ConfigOption *opt);
|
||||
static char *get_compress_alg(ConfigOption *opt);
|
||||
|
||||
static void show_configure_start(void);
|
||||
static void show_configure_end(void);
|
||||
static void show_configure(pgBackupConfig *config);
|
||||
|
||||
static void show_configure_json(pgBackupConfig *config);
|
||||
static void show_configure_plain(ConfigOption *opt);
|
||||
static void show_configure_json(ConfigOption *opt);
|
||||
|
||||
static pgBackupConfig *cur_config = NULL;
|
||||
#define RETENTION_REDUNDANCY_DEFAULT 0
|
||||
#define RETENTION_WINDOW_DEFAULT 0
|
||||
|
||||
#define OPTION_INSTANCE_GROUP "Backup instance information"
|
||||
#define OPTION_CONN_GROUP "Connection parameters"
|
||||
#define OPTION_REPLICA_GROUP "Replica parameters"
|
||||
#define OPTION_ARCHIVE_GROUP "Archive parameters"
|
||||
#define OPTION_LOG_GROUP "Logging parameters"
|
||||
#define OPTION_RETENTION_GROUP "Retention parameters"
|
||||
#define OPTION_COMPRESS_GROUP "Compression parameters"
|
||||
|
||||
/*
|
||||
* Short name should be non-printable ASCII character.
|
||||
*/
|
||||
ConfigOption instance_options[] =
|
||||
{
|
||||
/* Instance options */
|
||||
{
|
||||
's', 'D', "pgdata",
|
||||
&instance_config.pgdata, SOURCE_CMD, 0,
|
||||
OPTION_INSTANCE_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
'U', 200, "system-identifier",
|
||||
&instance_config.system_identifier, SOURCE_FILE_STRICT, 0,
|
||||
OPTION_INSTANCE_GROUP, 0, option_get_value
|
||||
},
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
{
|
||||
'u', 201, "xlog-seg-size",
|
||||
&instance_config.xlog_seg_size, SOURCE_FILE_STRICT, 0,
|
||||
OPTION_INSTANCE_GROUP, 0, option_get_value
|
||||
},
|
||||
#endif
|
||||
/* Connection options */
|
||||
{
|
||||
's', 'd', "pgdatabase",
|
||||
&instance_config.pgdatabase, SOURCE_CMD, 0,
|
||||
OPTION_CONN_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 'h', "pghost",
|
||||
&instance_config.pghost, SOURCE_CMD, 0,
|
||||
OPTION_CONN_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 'p', "pgport",
|
||||
&instance_config.pgport, SOURCE_CMD, 0,
|
||||
OPTION_CONN_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 'U', "pguser",
|
||||
&instance_config.pguser, SOURCE_CMD, 0,
|
||||
OPTION_CONN_GROUP, 0, option_get_value
|
||||
},
|
||||
/* Replica options */
|
||||
{
|
||||
's', 202, "master-db",
|
||||
&instance_config.master_db, SOURCE_CMD, 0,
|
||||
OPTION_REPLICA_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 203, "master-host",
|
||||
&instance_config.master_host, SOURCE_CMD, 0,
|
||||
OPTION_REPLICA_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 204, "master-port",
|
||||
&instance_config.master_port, SOURCE_CMD, 0,
|
||||
OPTION_REPLICA_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 205, "master-user",
|
||||
&instance_config.master_user, SOURCE_CMD, 0,
|
||||
OPTION_REPLICA_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
'u', 206, "replica-timeout",
|
||||
&instance_config.replica_timeout, SOURCE_CMD, SOURCE_DEFAULT,
|
||||
OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value
|
||||
},
|
||||
/* Archive options */
|
||||
{
|
||||
'u', 207, "archive-timeout",
|
||||
&instance_config.archive_timeout, SOURCE_CMD, SOURCE_DEFAULT,
|
||||
OPTION_ARCHIVE_GROUP, OPTION_UNIT_S, option_get_value
|
||||
},
|
||||
/* Logging options */
|
||||
{
|
||||
'f', 208, "log-level-console",
|
||||
assign_log_level_console, SOURCE_CMD, 0,
|
||||
OPTION_LOG_GROUP, 0, get_log_level_console
|
||||
},
|
||||
{
|
||||
'f', 209, "log-level-file",
|
||||
assign_log_level_file, SOURCE_CMD, 0,
|
||||
OPTION_LOG_GROUP, 0, get_log_level_file
|
||||
},
|
||||
{
|
||||
's', 210, "log-filename",
|
||||
&instance_config.logger.log_filename, SOURCE_CMD, 0,
|
||||
OPTION_LOG_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 211, "error-log-filename",
|
||||
&instance_config.logger.error_log_filename, SOURCE_CMD, 0,
|
||||
OPTION_LOG_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
's', 212, "log-directory",
|
||||
&instance_config.logger.log_directory, SOURCE_CMD, 0,
|
||||
OPTION_LOG_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
'U', 213, "log-rotation-size",
|
||||
&instance_config.logger.log_rotation_size, SOURCE_CMD, SOURCE_DEFAULT,
|
||||
OPTION_LOG_GROUP, OPTION_UNIT_KB, option_get_value
|
||||
},
|
||||
{
|
||||
'U', 214, "log-rotation-age",
|
||||
&instance_config.logger.log_rotation_age, SOURCE_CMD, SOURCE_DEFAULT,
|
||||
OPTION_LOG_GROUP, OPTION_UNIT_MS, option_get_value
|
||||
},
|
||||
/* Retention options */
|
||||
{
|
||||
'u', 215, "retention-redundancy",
|
||||
&instance_config.retention_redundancy, SOURCE_CMD, 0,
|
||||
OPTION_RETENTION_GROUP, 0, option_get_value
|
||||
},
|
||||
{
|
||||
'u', 216, "retention-window",
|
||||
&instance_config.retention_window, SOURCE_CMD, 0,
|
||||
OPTION_RETENTION_GROUP, 0, option_get_value
|
||||
},
|
||||
/* Compression options */
|
||||
{
|
||||
'f', 217, "compress-algorithm",
|
||||
assign_compress_alg, SOURCE_CMD, 0,
|
||||
OPTION_COMPRESS_GROUP, 0, get_compress_alg
|
||||
},
|
||||
{
|
||||
'u', 218, "compress-level",
|
||||
&instance_config.compress_level, SOURCE_CMD, 0,
|
||||
OPTION_COMPRESS_GROUP, 0, option_get_value
|
||||
},
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
/* An instance configuration with default options */
|
||||
InstanceConfig instance_config;
|
||||
|
||||
static PQExpBufferData show_buf;
|
||||
static int32 json_level = 0;
|
||||
static const char *current_group = NULL;
|
||||
|
||||
/*
|
||||
* All this code needs refactoring.
|
||||
* Show configure options including default values.
|
||||
*/
|
||||
|
||||
/* Set configure options */
|
||||
int
|
||||
do_configure(bool show_only)
|
||||
void
|
||||
do_show_config(void)
|
||||
{
|
||||
pgBackupConfig *config = readBackupCatalogConfigFile();
|
||||
if (pgdata)
|
||||
config->pgdata = pgdata;
|
||||
if (pgut_dbname)
|
||||
config->pgdatabase = pgut_dbname;
|
||||
if (host)
|
||||
config->pghost = host;
|
||||
if (port)
|
||||
config->pgport = port;
|
||||
if (username)
|
||||
config->pguser = username;
|
||||
int i;
|
||||
|
||||
if (master_host)
|
||||
config->master_host = master_host;
|
||||
if (master_port)
|
||||
config->master_port = master_port;
|
||||
if (master_db)
|
||||
config->master_db = master_db;
|
||||
if (master_user)
|
||||
config->master_user = master_user;
|
||||
show_configure_start();
|
||||
|
||||
if (replica_timeout)
|
||||
config->replica_timeout = replica_timeout;
|
||||
for (i = 0; instance_options[i].type; i++)
|
||||
{
|
||||
if (show_format == SHOW_PLAIN)
|
||||
show_configure_plain(&instance_options[i]);
|
||||
else
|
||||
show_configure_json(&instance_options[i]);
|
||||
}
|
||||
|
||||
if (archive_timeout)
|
||||
config->archive_timeout = archive_timeout;
|
||||
show_configure_end();
|
||||
}
|
||||
|
||||
if (log_level_console)
|
||||
config->log_level_console = log_level_console;
|
||||
if (log_level_file)
|
||||
config->log_level_file = log_level_file;
|
||||
if (log_filename)
|
||||
config->log_filename = log_filename;
|
||||
if (error_log_filename)
|
||||
config->error_log_filename = error_log_filename;
|
||||
if (log_directory)
|
||||
config->log_directory = log_directory;
|
||||
if (log_rotation_size)
|
||||
config->log_rotation_size = log_rotation_size;
|
||||
if (log_rotation_age)
|
||||
config->log_rotation_age = log_rotation_age;
|
||||
/*
|
||||
* Save configure options into BACKUP_CATALOG_CONF_FILE. Do not save default
|
||||
* values into the file.
|
||||
*/
|
||||
void
|
||||
do_set_config(void)
|
||||
{
|
||||
char path[MAXPGPATH];
|
||||
FILE *fp;
|
||||
int i;
|
||||
|
||||
if (retention_redundancy)
|
||||
config->retention_redundancy = retention_redundancy;
|
||||
if (retention_window)
|
||||
config->retention_window = retention_window;
|
||||
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
fp = fopen(path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot create %s: %s",
|
||||
BACKUP_CATALOG_CONF_FILE, strerror(errno));
|
||||
|
||||
if (compress_alg)
|
||||
config->compress_alg = compress_alg;
|
||||
if (compress_level)
|
||||
config->compress_level = compress_level;
|
||||
current_group = NULL;
|
||||
|
||||
if (show_only)
|
||||
show_configure(config);
|
||||
else
|
||||
writeBackupCatalogConfigFile(config);
|
||||
for (i = 0; instance_options[i].type; i++)
|
||||
{
|
||||
ConfigOption *opt = &instance_options[i];
|
||||
char *value;
|
||||
|
||||
return 0;
|
||||
/* Save only options from command line */
|
||||
if (opt->source != SOURCE_CMD &&
|
||||
/* ...or options from the previous configure file */
|
||||
opt->source != SOURCE_FILE && opt->source != SOURCE_FILE_STRICT)
|
||||
continue;
|
||||
|
||||
value = opt->get_value(opt);
|
||||
if (value == NULL)
|
||||
continue;
|
||||
|
||||
if (current_group == NULL || strcmp(opt->group, current_group) != 0)
|
||||
{
|
||||
current_group = opt->group;
|
||||
fprintf(fp, "# %s\n", current_group);
|
||||
}
|
||||
|
||||
fprintf(fp, "%s = %s\n", opt->lname, value);
|
||||
pfree(value);
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
void
|
||||
pgBackupConfigInit(pgBackupConfig *config)
|
||||
init_config(InstanceConfig *config)
|
||||
{
|
||||
config->system_identifier = 0;
|
||||
MemSet(config, 0, sizeof(InstanceConfig));
|
||||
|
||||
/*
|
||||
* Starting from PostgreSQL 11 WAL segment size may vary. Prior to
|
||||
* PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
config->xlog_seg_size = 0;
|
||||
#else
|
||||
config->xlog_seg_size = XLOG_SEG_SIZE;
|
||||
#endif
|
||||
|
||||
config->pgdata = NULL;
|
||||
config->pgdatabase = NULL;
|
||||
config->pghost = NULL;
|
||||
config->pgport = NULL;
|
||||
config->pguser = NULL;
|
||||
|
||||
config->master_host = NULL;
|
||||
config->master_port = NULL;
|
||||
config->master_db = NULL;
|
||||
config->master_user = NULL;
|
||||
config->replica_timeout = REPLICA_TIMEOUT_DEFAULT;
|
||||
|
||||
config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
|
||||
|
||||
config->log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
|
||||
config->log_level_file = LOG_LEVEL_FILE_DEFAULT;
|
||||
config->log_filename = LOG_FILENAME_DEFAULT;
|
||||
config->error_log_filename = NULL;
|
||||
config->log_directory = LOG_DIRECTORY_DEFAULT;
|
||||
config->log_rotation_size = LOG_ROTATION_SIZE_DEFAULT;
|
||||
config->log_rotation_age = LOG_ROTATION_AGE_DEFAULT;
|
||||
/* Copy logger defaults */
|
||||
config->logger = logger_config;
|
||||
|
||||
config->retention_redundancy = RETENTION_REDUNDANCY_DEFAULT;
|
||||
config->retention_window = RETENTION_WINDOW_DEFAULT;
|
||||
@ -135,198 +279,40 @@ pgBackupConfigInit(pgBackupConfig *config)
|
||||
config->compress_level = COMPRESS_LEVEL_DEFAULT;
|
||||
}
|
||||
|
||||
void
|
||||
writeBackupCatalogConfig(FILE *out, pgBackupConfig *config)
|
||||
static void
|
||||
assign_log_level_console(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
uint64 res;
|
||||
const char *unit;
|
||||
|
||||
fprintf(out, "#Backup instance info\n");
|
||||
fprintf(out, "PGDATA = %s\n", config->pgdata);
|
||||
fprintf(out, "system-identifier = " UINT64_FORMAT "\n", config->system_identifier);
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
fprintf(out, "xlog-seg-size = %u\n", config->xlog_seg_size);
|
||||
#endif
|
||||
|
||||
fprintf(out, "#Connection parameters:\n");
|
||||
if (config->pgdatabase)
|
||||
fprintf(out, "PGDATABASE = %s\n", config->pgdatabase);
|
||||
if (config->pghost)
|
||||
fprintf(out, "PGHOST = %s\n", config->pghost);
|
||||
if (config->pgport)
|
||||
fprintf(out, "PGPORT = %s\n", config->pgport);
|
||||
if (config->pguser)
|
||||
fprintf(out, "PGUSER = %s\n", config->pguser);
|
||||
|
||||
fprintf(out, "#Replica parameters:\n");
|
||||
if (config->master_host)
|
||||
fprintf(out, "master-host = %s\n", config->master_host);
|
||||
if (config->master_port)
|
||||
fprintf(out, "master-port = %s\n", config->master_port);
|
||||
if (config->master_db)
|
||||
fprintf(out, "master-db = %s\n", config->master_db);
|
||||
if (config->master_user)
|
||||
fprintf(out, "master-user = %s\n", config->master_user);
|
||||
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "replica-timeout = " UINT64_FORMAT "%s\n", res, unit);
|
||||
|
||||
fprintf(out, "#Archive parameters:\n");
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "archive-timeout = " UINT64_FORMAT "%s\n", res, unit);
|
||||
|
||||
fprintf(out, "#Logging parameters:\n");
|
||||
fprintf(out, "log-level-console = %s\n", deparse_log_level(config->log_level_console));
|
||||
fprintf(out, "log-level-file = %s\n", deparse_log_level(config->log_level_file));
|
||||
fprintf(out, "log-filename = %s\n", config->log_filename);
|
||||
if (config->error_log_filename)
|
||||
fprintf(out, "error-log-filename = %s\n", config->error_log_filename);
|
||||
|
||||
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
|
||||
fprintf(out, "log-directory = %s/%s\n", backup_path, config->log_directory);
|
||||
else
|
||||
fprintf(out, "log-directory = %s\n", config->log_directory);
|
||||
/* Convert values from base unit */
|
||||
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
|
||||
&res, &unit);
|
||||
fprintf(out, "log-rotation-size = " UINT64_FORMAT "%s\n", res, (res)?unit:"KB");
|
||||
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
fprintf(out, "log-rotation-age = " UINT64_FORMAT "%s\n", res, (res)?unit:"min");
|
||||
|
||||
fprintf(out, "#Retention parameters:\n");
|
||||
fprintf(out, "retention-redundancy = %u\n", config->retention_redundancy);
|
||||
fprintf(out, "retention-window = %u\n", config->retention_window);
|
||||
|
||||
fprintf(out, "#Compression parameters:\n");
|
||||
|
||||
fprintf(out, "compress-algorithm = %s\n", deparse_compress_alg(config->compress_alg));
|
||||
fprintf(out, "compress-level = %d\n", config->compress_level);
|
||||
}
|
||||
|
||||
void
|
||||
writeBackupCatalogConfigFile(pgBackupConfig *config)
|
||||
{
|
||||
char path[MAXPGPATH];
|
||||
FILE *fp;
|
||||
|
||||
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
fp = fopen(path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot create %s: %s",
|
||||
BACKUP_CATALOG_CONF_FILE, strerror(errno));
|
||||
|
||||
writeBackupCatalogConfig(fp, config);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
|
||||
pgBackupConfig*
|
||||
readBackupCatalogConfigFile(void)
|
||||
{
|
||||
pgBackupConfig *config = pgut_new(pgBackupConfig);
|
||||
char path[MAXPGPATH];
|
||||
|
||||
pgut_option options[] =
|
||||
{
|
||||
/* retention options */
|
||||
{ 'u', 0, "retention-redundancy", &(config->retention_redundancy),SOURCE_FILE_STRICT },
|
||||
{ 'u', 0, "retention-window", &(config->retention_window), SOURCE_FILE_STRICT },
|
||||
/* compression options */
|
||||
{ 'f', 0, "compress-algorithm", opt_compress_alg, SOURCE_CMDLINE },
|
||||
{ 'u', 0, "compress-level", &(config->compress_level), SOURCE_CMDLINE },
|
||||
/* logging options */
|
||||
{ 'f', 0, "log-level-console", opt_log_level_console, SOURCE_CMDLINE },
|
||||
{ 'f', 0, "log-level-file", opt_log_level_file, SOURCE_CMDLINE },
|
||||
{ 's', 0, "log-filename", &(config->log_filename), SOURCE_CMDLINE },
|
||||
{ 's', 0, "error-log-filename", &(config->error_log_filename), SOURCE_CMDLINE },
|
||||
{ 's', 0, "log-directory", &(config->log_directory), SOURCE_CMDLINE },
|
||||
{ 'U', 0, "log-rotation-size", &(config->log_rotation_size), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'U', 0, "log-rotation-age", &(config->log_rotation_age), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
/* connection options */
|
||||
{ 's', 0, "pgdata", &(config->pgdata), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "pgdatabase", &(config->pgdatabase), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "pghost", &(config->pghost), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "pgport", &(config->pgport), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "pguser", &(config->pguser), SOURCE_FILE_STRICT },
|
||||
/* replica options */
|
||||
{ 's', 0, "master-host", &(config->master_host), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-port", &(config->master_port), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-db", &(config->master_db), SOURCE_FILE_STRICT },
|
||||
{ 's', 0, "master-user", &(config->master_user), SOURCE_FILE_STRICT },
|
||||
{ 'u', 0, "replica-timeout", &(config->replica_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
/* other options */
|
||||
{ 'U', 0, "system-identifier", &(config->system_identifier), SOURCE_FILE_STRICT },
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
{'u', 0, "xlog-seg-size", &config->xlog_seg_size, SOURCE_FILE_STRICT},
|
||||
#endif
|
||||
/* archive options */
|
||||
{ 'u', 0, "archive-timeout", &(config->archive_timeout), SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{0}
|
||||
};
|
||||
|
||||
cur_config = config;
|
||||
|
||||
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
|
||||
pgBackupConfigInit(config);
|
||||
pgut_readopt(path, options, ERROR, true);
|
||||
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
if (!IsValidWalSegSize(config->xlog_seg_size))
|
||||
elog(ERROR, "Invalid WAL segment size %u", config->xlog_seg_size);
|
||||
#endif
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read xlog-seg-size from BACKUP_CATALOG_CONF_FILE.
|
||||
*/
|
||||
uint32
|
||||
get_config_xlog_seg_size(void)
|
||||
{
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
char path[MAXPGPATH];
|
||||
uint32 seg_size;
|
||||
pgut_option options[] =
|
||||
{
|
||||
{'u', 0, "xlog-seg-size", &seg_size, SOURCE_FILE_STRICT},
|
||||
{0}
|
||||
};
|
||||
|
||||
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
pgut_readopt(path, options, ERROR, false);
|
||||
|
||||
if (!IsValidWalSegSize(seg_size))
|
||||
elog(ERROR, "Invalid WAL segment size %u", seg_size);
|
||||
|
||||
return seg_size;
|
||||
|
||||
#else
|
||||
return (uint32) XLOG_SEG_SIZE;
|
||||
#endif
|
||||
instance_config.logger.log_level_console = parse_log_level(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_log_level_console(pgut_option *opt, const char *arg)
|
||||
assign_log_level_file(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
cur_config->log_level_console = parse_log_level(arg);
|
||||
instance_config.logger.log_level_file = parse_log_level(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_log_level_file(pgut_option *opt, const char *arg)
|
||||
assign_compress_alg(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
cur_config->log_level_file = parse_log_level(arg);
|
||||
instance_config.compress_alg = parse_compress_alg(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
static char *
|
||||
get_log_level_console(ConfigOption *opt)
|
||||
{
|
||||
cur_config->compress_alg = parse_compress_alg(arg);
|
||||
return pstrdup(deparse_log_level(instance_config.logger.log_level_console));
|
||||
}
|
||||
|
||||
static char *
|
||||
get_log_level_file(ConfigOption *opt)
|
||||
{
|
||||
return pstrdup(deparse_log_level(instance_config.logger.log_level_file));
|
||||
}
|
||||
|
||||
static char *
|
||||
get_compress_alg(ConfigOption *opt)
|
||||
{
|
||||
return pstrdup(deparse_compress_alg(instance_config.compress_alg));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -335,12 +321,15 @@ opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
static void
|
||||
show_configure_start(void)
|
||||
{
|
||||
if (show_format == SHOW_PLAIN)
|
||||
return;
|
||||
|
||||
/* For now we need buffer only for JSON format */
|
||||
json_level = 0;
|
||||
initPQExpBuffer(&show_buf);
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
current_group = NULL;
|
||||
else
|
||||
{
|
||||
json_level = 0;
|
||||
json_add(&show_buf, JT_BEGIN_OBJECT, &json_level);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -350,28 +339,38 @@ static void
|
||||
show_configure_end(void)
|
||||
{
|
||||
if (show_format == SHOW_PLAIN)
|
||||
return;
|
||||
current_group = NULL;
|
||||
else
|
||||
{
|
||||
json_add(&show_buf, JT_END_OBJECT, &json_level);
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
}
|
||||
|
||||
fputs(show_buf.data, stdout);
|
||||
termPQExpBuffer(&show_buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show configure information of pg_probackup.
|
||||
* Plain output.
|
||||
*/
|
||||
|
||||
static void
|
||||
show_configure(pgBackupConfig *config)
|
||||
show_configure_plain(ConfigOption *opt)
|
||||
{
|
||||
show_configure_start();
|
||||
char *value;
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
writeBackupCatalogConfig(stdout, config);
|
||||
else
|
||||
show_configure_json(config);
|
||||
value = opt->get_value(opt);
|
||||
if (value == NULL)
|
||||
return;
|
||||
|
||||
show_configure_end();
|
||||
if (current_group == NULL || strcmp(opt->group, current_group) != 0)
|
||||
{
|
||||
current_group = opt->group;
|
||||
appendPQExpBuffer(&show_buf, "# %s\n", current_group);
|
||||
}
|
||||
|
||||
appendPQExpBuffer(&show_buf, "%s = %s\n", opt->lname, value);
|
||||
pfree(value);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -379,109 +378,15 @@ show_configure(pgBackupConfig *config)
|
||||
*/
|
||||
|
||||
static void
|
||||
show_configure_json(pgBackupConfig *config)
|
||||
show_configure_json(ConfigOption *opt)
|
||||
{
|
||||
PQExpBuffer buf = &show_buf;
|
||||
uint64 res;
|
||||
const char *unit;
|
||||
char *value;
|
||||
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
value = opt->get_value(opt);
|
||||
if (value == NULL)
|
||||
return;
|
||||
|
||||
json_add_value(buf, "pgdata", config->pgdata, json_level, false);
|
||||
|
||||
json_add_key(buf, "system-identifier", json_level, true);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
|
||||
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
json_add_key(buf, "xlog-seg-size", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", config->xlog_seg_size);
|
||||
#endif
|
||||
|
||||
/* Connection parameters */
|
||||
if (config->pgdatabase)
|
||||
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
|
||||
if (config->pghost)
|
||||
json_add_value(buf, "pghost", config->pghost, json_level, true);
|
||||
if (config->pgport)
|
||||
json_add_value(buf, "pgport", config->pgport, json_level, true);
|
||||
if (config->pguser)
|
||||
json_add_value(buf, "pguser", config->pguser, json_level, true);
|
||||
|
||||
/* Replica parameters */
|
||||
if (config->master_host)
|
||||
json_add_value(buf, "master-host", config->master_host, json_level,
|
||||
true);
|
||||
if (config->master_port)
|
||||
json_add_value(buf, "master-port", config->master_port, json_level,
|
||||
true);
|
||||
if (config->master_db)
|
||||
json_add_value(buf, "master-db", config->master_db, json_level, true);
|
||||
if (config->master_user)
|
||||
json_add_value(buf, "master-user", config->master_user, json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "replica-timeout", json_level, true);
|
||||
convert_from_base_unit_u(config->replica_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
|
||||
|
||||
/* Archive parameters */
|
||||
json_add_key(buf, "archive-timeout", json_level, true);
|
||||
convert_from_base_unit_u(config->archive_timeout, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, unit);
|
||||
|
||||
/* Logging parameters */
|
||||
json_add_value(buf, "log-level-console",
|
||||
deparse_log_level(config->log_level_console), json_level,
|
||||
true);
|
||||
json_add_value(buf, "log-level-file",
|
||||
deparse_log_level(config->log_level_file), json_level,
|
||||
true);
|
||||
json_add_value(buf, "log-filename", config->log_filename, json_level,
|
||||
true);
|
||||
if (config->error_log_filename)
|
||||
json_add_value(buf, "error-log-filename", config->error_log_filename,
|
||||
json_level, true);
|
||||
|
||||
if (strcmp(config->log_directory, LOG_DIRECTORY_DEFAULT) == 0)
|
||||
{
|
||||
char log_directory_fullpath[MAXPGPATH];
|
||||
|
||||
sprintf(log_directory_fullpath, "%s/%s",
|
||||
backup_path, config->log_directory);
|
||||
|
||||
json_add_value(buf, "log-directory", log_directory_fullpath,
|
||||
json_level, true);
|
||||
}
|
||||
else
|
||||
json_add_value(buf, "log-directory", config->log_directory,
|
||||
json_level, true);
|
||||
|
||||
json_add_key(buf, "log-rotation-size", json_level, true);
|
||||
convert_from_base_unit_u(config->log_rotation_size, OPTION_UNIT_KB,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"KB");
|
||||
|
||||
json_add_key(buf, "log-rotation-age", json_level, true);
|
||||
convert_from_base_unit_u(config->log_rotation_age, OPTION_UNIT_MS,
|
||||
&res, &unit);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT "%s", res, (res)?unit:"min");
|
||||
|
||||
/* Retention parameters */
|
||||
json_add_key(buf, "retention-redundancy", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", config->retention_redundancy);
|
||||
|
||||
json_add_key(buf, "retention-window", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", config->retention_window);
|
||||
|
||||
/* Compression parameters */
|
||||
json_add_value(buf, "compress-algorithm",
|
||||
deparse_compress_alg(config->compress_alg), json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "compress-level", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", config->compress_level);
|
||||
|
||||
json_add(buf, JT_END_OBJECT, &json_level);
|
||||
json_add_value(&show_buf, opt->lname, value, json_level,
|
||||
opt->type == 's' || opt->flags & OPTION_UNIT);
|
||||
pfree(value);
|
||||
}
|
||||
|
179
src/data.c
179
src/data.c
@ -29,6 +29,9 @@ typedef union DataPage
|
||||
char data[BLCKSZ];
|
||||
} DataPage;
|
||||
|
||||
static bool
|
||||
fileEqualCRC(const char *path1, const char *path2, bool path2_is_compressed);
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/* Implementation of zlib compression method */
|
||||
static int32
|
||||
@ -484,7 +487,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
blknum, header.compressed_size, write_buffer_size); */
|
||||
|
||||
/* Update CRC */
|
||||
COMP_TRADITIONAL_CRC32(*crc, write_buffer, write_buffer_size);
|
||||
COMP_FILE_CRC32(true, *crc, write_buffer, write_buffer_size);
|
||||
|
||||
/* write data page */
|
||||
if (fio_fwrite(out, write_buffer, write_buffer_size) != write_buffer_size)
|
||||
@ -544,13 +547,13 @@ backup_data_file(backup_files_arg* arguments,
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
file->write_size = 0;
|
||||
INIT_TRADITIONAL_CRC32(file->crc);
|
||||
INIT_FILE_CRC32(true, file->crc);
|
||||
|
||||
/* open backup mode file for read */
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
FIN_FILE_CRC32(true, file->crc);
|
||||
|
||||
/*
|
||||
* If file is not found, this is not en error.
|
||||
@ -654,7 +657,7 @@ backup_data_file(backup_files_arg* arguments,
|
||||
to_path, strerror(errno));
|
||||
fclose(in);
|
||||
|
||||
FIN_TRADITIONAL_CRC32(file->crc);
|
||||
FIN_FILE_CRC32(true, file->crc);
|
||||
|
||||
/*
|
||||
* If we have pagemap then file in the backup can't be a zero size.
|
||||
@ -915,7 +918,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file, fio_location
|
||||
struct stat st;
|
||||
pg_crc32 crc;
|
||||
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
INIT_FILE_CRC32(true, crc);
|
||||
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
@ -925,7 +928,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file, fio_location
|
||||
in = fopen(file->path, PG_BINARY_R);
|
||||
if (in == NULL)
|
||||
{
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
FIN_FILE_CRC32(true, crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* maybe deleted, it's not error */
|
||||
@ -974,7 +977,7 @@ copy_file(const char *from_root, const char *to_root, pgFile *file, fio_location
|
||||
strerror(errno_tmp));
|
||||
}
|
||||
/* update CRC */
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
COMP_FILE_CRC32(true, crc, buf, read_len);
|
||||
|
||||
file->read_size += read_len;
|
||||
}
|
||||
@ -1001,14 +1004,14 @@ copy_file(const char *from_root, const char *to_root, pgFile *file, fio_location
|
||||
strerror(errno_tmp));
|
||||
}
|
||||
/* update CRC */
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
COMP_FILE_CRC32(true, crc, buf, read_len);
|
||||
|
||||
file->read_size += read_len;
|
||||
}
|
||||
|
||||
file->write_size = (int64) file->read_size;
|
||||
/* finish CRC calculation and store into pgFile */
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
FIN_FILE_CRC32(true, crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* update file permission */
|
||||
@ -1082,7 +1085,7 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
|
||||
FILE *in = NULL;
|
||||
FILE *out=NULL;
|
||||
char buf[XLOG_BLCKSZ];
|
||||
const char *to_path_p = to_path;
|
||||
const char *to_path_p;
|
||||
char to_path_temp[MAXPGPATH];
|
||||
int errno_temp;
|
||||
|
||||
@ -1090,7 +1093,15 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
|
||||
char gz_to_path[MAXPGPATH];
|
||||
gzFile gz_out = NULL;
|
||||
int gz_tmp = -1;
|
||||
|
||||
if (is_compress)
|
||||
{
|
||||
snprintf(gz_to_path, sizeof(gz_to_path), "%s.gz", to_path);
|
||||
to_path_p = gz_to_path;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
to_path_p = to_path;
|
||||
|
||||
/* open file for read */
|
||||
in = fopen(from_path, PG_BINARY_R);
|
||||
@ -1098,30 +1109,31 @@ push_wal_file(const char *from_path, const char *to_path, bool is_compress,
|
||||
elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_path,
|
||||
strerror(errno));
|
||||
|
||||
/* Check if possible to skip copying */
|
||||
if (fileExists(to_path_p, FIO_BACKUP_HOST))
|
||||
{
|
||||
if (fileEqualCRC(from_path, to_path_p, is_compress))
|
||||
return;
|
||||
/* Do not copy and do not rise error. Just quit as normal. */
|
||||
else if (!overwrite)
|
||||
elog(ERROR, "WAL segment \"%s\" already exists.", to_path_p);
|
||||
}
|
||||
|
||||
/* open backup file for write */
|
||||
#ifdef HAVE_LIBZ
|
||||
if (is_compress)
|
||||
{
|
||||
snprintf(gz_to_path, sizeof(gz_to_path), "%s.gz", to_path);
|
||||
|
||||
if (!overwrite && fileExists(gz_to_path, FIO_BACKUP_HOST))
|
||||
elog(ERROR, "WAL segment \"%s\" already exists.", gz_to_path);
|
||||
|
||||
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", gz_to_path);
|
||||
|
||||
gz_out = fio_gzopen(to_path_temp, PG_BINARY_W, &gz_tmp, FIO_BACKUP_HOST);
|
||||
if (gzsetparams(gz_out, compress_level, Z_DEFAULT_STRATEGY) != Z_OK)
|
||||
if (gzsetparams(gz_out, instance_config.compress_level, Z_DEFAULT_STRATEGY) != Z_OK)
|
||||
elog(ERROR, "Cannot set compression level %d to file \"%s\": %s",
|
||||
compress_level, to_path_temp, get_gz_error(gz_out, errno));
|
||||
|
||||
to_path_p = gz_to_path;
|
||||
instance_config.compress_level, to_path_temp,
|
||||
get_gz_error(gz_out, errno));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (!overwrite && fileExists(to_path, FIO_BACKUP_HOST))
|
||||
elog(ERROR, "WAL segment \"%s\" already exists.", to_path);
|
||||
|
||||
snprintf(to_path_temp, sizeof(to_path_temp), "%s.partial", to_path);
|
||||
|
||||
out = fio_fopen(to_path_temp, PG_BINARY_W, FIO_BACKUP_HOST);
|
||||
@ -1397,70 +1409,13 @@ get_wal_file(const char *from_path, const char *to_path)
|
||||
* but created in process of backup, such as stream XLOG files,
|
||||
* PG_TABLESPACE_MAP_FILE and PG_BACKUP_LABEL_FILE.
|
||||
*/
|
||||
bool
|
||||
void
|
||||
calc_file_checksum(pgFile *file)
|
||||
{
|
||||
int in;
|
||||
ssize_t read_len = 0;
|
||||
int errno_tmp;
|
||||
char buf[BLCKSZ];
|
||||
struct stat st;
|
||||
pg_crc32 crc;
|
||||
|
||||
Assert(S_ISREG(file->mode));
|
||||
INIT_TRADITIONAL_CRC32(crc);
|
||||
|
||||
/* reset size summary */
|
||||
file->read_size = 0;
|
||||
file->write_size = 0;
|
||||
|
||||
/* open backup mode file for read */
|
||||
in = fio_open(file->path, O_RDONLY|PG_BINARY, FIO_BACKUP_HOST);
|
||||
if (in < 0)
|
||||
{
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
/* maybe deleted, it's not error */
|
||||
if (errno == ENOENT)
|
||||
return false;
|
||||
|
||||
elog(ERROR, "cannot open source file \"%s\": %s", file->path,
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
/* stat source file to change mode of destination file */
|
||||
if (fio_fstat(in, &st) == -1)
|
||||
{
|
||||
fio_close(in);
|
||||
elog(ERROR, "cannot stat \"%s\": %s", file->path,
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
while ((read_len = fio_read(in, buf, sizeof(buf))) > 0)
|
||||
{
|
||||
/* update CRC */
|
||||
COMP_TRADITIONAL_CRC32(crc, buf, read_len);
|
||||
|
||||
file->write_size += read_len;
|
||||
file->read_size += read_len;
|
||||
}
|
||||
|
||||
errno_tmp = errno;
|
||||
if (read_len < 0)
|
||||
{
|
||||
fio_close(in);
|
||||
elog(ERROR, "cannot read backup mode file \"%s\": %s",
|
||||
file->path, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
/* finish CRC calculation and store into pgFile */
|
||||
FIN_TRADITIONAL_CRC32(crc);
|
||||
file->crc = crc;
|
||||
|
||||
fio_close(in);
|
||||
|
||||
return true;
|
||||
file->crc = pgFileGetCRC(file->path, true, false, &file->read_size, FIO_BACKUP_HOST);
|
||||
file->write_size = file->read_size;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1582,14 +1537,14 @@ validate_one_page(Page page, pgFile *file,
|
||||
|
||||
/* Valiate pages of datafile in backup one by one */
|
||||
bool
|
||||
check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version, uint32 backup_version)
|
||||
check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
|
||||
uint32 backup_version)
|
||||
{
|
||||
size_t read_len = 0;
|
||||
bool is_valid = true;
|
||||
FILE *in;
|
||||
pg_crc32 crc;
|
||||
bool use_crc32c = (backup_version <= 20021);
|
||||
bool use_crc32c = backup_version <= 20021 || backup_version >= 20025;
|
||||
|
||||
elog(VERBOSE, "validate relation blocks for file %s", file->name);
|
||||
|
||||
@ -1708,3 +1663,57 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
|
||||
|
||||
return is_valid;
|
||||
}
|
||||
|
||||
static bool
|
||||
fileEqualCRC(const char *path1, const char *path2, bool path2_is_compressed)
|
||||
{
|
||||
pg_crc32 crc1;
|
||||
pg_crc32 crc2;
|
||||
|
||||
/* Get checksum of backup file */
|
||||
#ifdef HAVE_LIBZ
|
||||
if (path2_is_compressed)
|
||||
{
|
||||
char buf [1024];
|
||||
gzFile gz_in = NULL;
|
||||
int gz_tmp = -1;
|
||||
|
||||
INIT_FILE_CRC32(true, crc2);
|
||||
gz_in = fio_gzopen(path2, PG_BINARY_R, &gz_tmp, FIO_BACKUP_HOST);
|
||||
if (gz_in == NULL)
|
||||
/* File cannot be read */
|
||||
elog(ERROR,
|
||||
"Cannot compare WAL file \"%s\" with compressed \"%s\"",
|
||||
path1, path2);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
size_t read_len = 0;
|
||||
read_len = gzread(gz_in, buf, sizeof(buf));
|
||||
if (read_len != sizeof(buf) && !gzeof(gz_in))
|
||||
/* An error occurred while reading the file */
|
||||
elog(ERROR,
|
||||
"Cannot compare WAL file \"%s\" with compressed \"%s\"",
|
||||
path1, path2);
|
||||
|
||||
COMP_FILE_CRC32(true, crc2, buf, read_len);
|
||||
if (gzeof(gz_in) || read_len == 0)
|
||||
break;
|
||||
}
|
||||
FIN_FILE_CRC32(true, crc2);
|
||||
|
||||
if (fio_gzclose(gz_in, path2, gz_tmp) != 0)
|
||||
elog(ERROR, "Cannot close compressed WAL file \"%s\": %s",
|
||||
path2, get_gz_error(gz_in, errno));
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
crc2 = pgFileGetCRC(path2, true, true, NULL, FIO_BACKUP_HOST);
|
||||
}
|
||||
|
||||
/* Get checksum of original file */
|
||||
crc1 = pgFileGetCRC(path1, true, true, NULL, FIO_LOCAL_HOST);
|
||||
|
||||
return EQ_CRC32C(crc1, crc2);
|
||||
}
|
||||
|
35
src/delete.c
35
src/delete.c
@ -108,7 +108,7 @@ do_delete(time_t backup_id)
|
||||
}
|
||||
}
|
||||
|
||||
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
|
||||
delete_walfiles(oldest_lsn, oldest_tli, instance_config.xlog_seg_size);
|
||||
}
|
||||
|
||||
/* cleanup */
|
||||
@ -124,9 +124,7 @@ int
|
||||
do_retention_purge(void)
|
||||
{
|
||||
parray *backup_list;
|
||||
uint32 backup_num;
|
||||
size_t i;
|
||||
time_t days_threshold = time(NULL) - (retention_window * 60 * 60 * 24);
|
||||
XLogRecPtr oldest_lsn = InvalidXLogRecPtr;
|
||||
TimeLineID oldest_tli = 0;
|
||||
bool keep_next_backup = true; /* Do not delete first full backup */
|
||||
@ -134,13 +132,13 @@ do_retention_purge(void)
|
||||
|
||||
if (delete_expired)
|
||||
{
|
||||
if (retention_redundancy > 0)
|
||||
elog(LOG, "REDUNDANCY=%u", retention_redundancy);
|
||||
if (retention_window > 0)
|
||||
elog(LOG, "WINDOW=%u", retention_window);
|
||||
if (instance_config.retention_redundancy > 0)
|
||||
elog(LOG, "REDUNDANCY=%u", instance_config.retention_redundancy);
|
||||
if (instance_config.retention_window > 0)
|
||||
elog(LOG, "WINDOW=%u", instance_config.retention_window);
|
||||
|
||||
if (retention_redundancy == 0
|
||||
&& retention_window == 0)
|
||||
if (instance_config.retention_redundancy == 0
|
||||
&& instance_config.retention_window == 0)
|
||||
{
|
||||
elog(WARNING, "Retention policy is not set");
|
||||
if (!delete_wal)
|
||||
@ -161,9 +159,15 @@ do_retention_purge(void)
|
||||
|
||||
/* Find target backups to be deleted */
|
||||
if (delete_expired &&
|
||||
(retention_redundancy > 0 || retention_window > 0))
|
||||
(instance_config.retention_redundancy > 0 ||
|
||||
instance_config.retention_window > 0))
|
||||
{
|
||||
backup_num = 0;
|
||||
time_t days_threshold;
|
||||
uint32 backup_num = 0;
|
||||
|
||||
days_threshold = time(NULL) -
|
||||
(instance_config.retention_window * 60 * 60 * 24);
|
||||
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
|
||||
@ -181,8 +185,9 @@ do_retention_purge(void)
|
||||
|
||||
/* Evaluate retention_redundancy if this backup is eligible for removal */
|
||||
if (keep_next_backup ||
|
||||
retention_redundancy >= backup_num_evaluate + 1 ||
|
||||
(retention_window > 0 && backup->recovery_time >= days_threshold))
|
||||
instance_config.retention_redundancy >= backup_num_evaluate + 1 ||
|
||||
(instance_config.retention_window > 0 &&
|
||||
backup->recovery_time >= days_threshold))
|
||||
{
|
||||
/* Save LSN and Timeline to remove unnecessary WAL segments */
|
||||
oldest_lsn = backup->start_lsn;
|
||||
@ -225,7 +230,7 @@ do_retention_purge(void)
|
||||
/* Purge WAL files */
|
||||
if (delete_wal)
|
||||
{
|
||||
delete_walfiles(oldest_lsn, oldest_tli, xlog_seg_size);
|
||||
delete_walfiles(oldest_lsn, oldest_tli, instance_config.xlog_seg_size);
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
@ -442,7 +447,7 @@ do_delete_instance(void)
|
||||
parray_free(backup_list);
|
||||
|
||||
/* Delete all wal files. */
|
||||
delete_walfiles(InvalidXLogRecPtr, 0, xlog_seg_size);
|
||||
delete_walfiles(InvalidXLogRecPtr, 0, instance_config.xlog_seg_size);
|
||||
|
||||
/* Delete backup instance config file */
|
||||
join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
|
61
src/dir.c
61
src/dir.c
@ -21,6 +21,8 @@
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
|
||||
#include "utils/configuration.h"
|
||||
|
||||
/*
|
||||
* The contents of these directories are removed or recreated during server
|
||||
* start so they are not included in backups. The directories themselves are
|
||||
@ -261,37 +263,56 @@ delete_file:
|
||||
}
|
||||
|
||||
pg_crc32
|
||||
pgFileGetCRC(const char *file_path, bool use_crc32c)
|
||||
pgFileGetCRC(const char *file_path, bool use_crc32c, bool raise_on_deleted,
|
||||
size_t *bytes_read, fio_location location)
|
||||
{
|
||||
FILE *fp;
|
||||
pg_crc32 crc = 0;
|
||||
char buf[1024];
|
||||
size_t len;
|
||||
size_t len = 0;
|
||||
size_t total = 0;
|
||||
int errno_tmp;
|
||||
|
||||
/* open file in binary read mode */
|
||||
fp = fopen(file_path, PG_BINARY_R);
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot open file \"%s\": %s",
|
||||
file_path, strerror(errno));
|
||||
|
||||
/* calc CRC of backup file */
|
||||
INIT_FILE_CRC32(use_crc32c, crc);
|
||||
while ((len = fread(buf, 1, sizeof(buf), fp)) == sizeof(buf))
|
||||
|
||||
/* open file in binary read mode */
|
||||
fp = fio_fopen(file_path, PG_BINARY_R, location);
|
||||
if (fp == NULL)
|
||||
{
|
||||
if (!raise_on_deleted && errno == ENOENT)
|
||||
{
|
||||
FIN_FILE_CRC32(use_crc32c, crc);
|
||||
return crc;
|
||||
}
|
||||
else
|
||||
elog(ERROR, "cannot open file \"%s\": %s",
|
||||
file_path, strerror(errno));
|
||||
}
|
||||
|
||||
/* calc CRC of file */
|
||||
for (;;)
|
||||
{
|
||||
if (interrupted)
|
||||
elog(ERROR, "interrupted during CRC calculation");
|
||||
|
||||
len = fio_fread(fp, buf, sizeof(buf));
|
||||
if(len == 0)
|
||||
break;
|
||||
/* update CRC */
|
||||
COMP_FILE_CRC32(use_crc32c, crc, buf, len);
|
||||
total += len;
|
||||
}
|
||||
|
||||
if (bytes_read)
|
||||
*bytes_read = total;
|
||||
|
||||
errno_tmp = errno;
|
||||
if (!feof(fp))
|
||||
if (len < 0)
|
||||
elog(WARNING, "cannot read \"%s\": %s", file_path,
|
||||
strerror(errno_tmp));
|
||||
if (len > 0)
|
||||
COMP_FILE_CRC32(use_crc32c, crc, buf, len);
|
||||
FIN_FILE_CRC32(use_crc32c, crc);
|
||||
|
||||
fclose(fp);
|
||||
FIN_FILE_CRC32(use_crc32c, crc);
|
||||
fio_fclose(fp);
|
||||
|
||||
return crc;
|
||||
}
|
||||
@ -381,7 +402,8 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
|
||||
|
||||
join_path_components(path, backup_instance_path, PG_BLACK_LIST);
|
||||
/* List files with black list */
|
||||
if (root && pgdata && strcmp(root, pgdata) == 0 && fileExists(path, FIO_LOCAL_HOST))
|
||||
if (root && instance_config.pgdata &&
|
||||
strcmp(root, instance_config.pgdata) == 0 && fileExists(path, FIO_LOCAL_HOST))
|
||||
{
|
||||
FILE *black_list_file = NULL;
|
||||
char buf[MAXPGPATH * 2];
|
||||
@ -395,7 +417,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
|
||||
|
||||
while (fgets(buf, lengthof(buf), black_list_file) != NULL)
|
||||
{
|
||||
join_path_components(black_item, pgdata, buf);
|
||||
join_path_components(black_item, instance_config.pgdata, buf);
|
||||
|
||||
if (black_item[strlen(black_item) - 1] == '\n')
|
||||
black_item[strlen(black_item) - 1] = '\0';
|
||||
@ -846,7 +868,7 @@ get_tablespace_created(const char *link)
|
||||
* Copy of function tablespace_list_append() from pg_basebackup.c.
|
||||
*/
|
||||
void
|
||||
opt_tablespace_map(pgut_option *opt, const char *arg)
|
||||
opt_tablespace_map(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
TablespaceListCell *cell = pgut_new(TablespaceListCell);
|
||||
char *dst;
|
||||
@ -1122,7 +1144,8 @@ check_tablespace_mapping(pgBackup *backup)
|
||||
/* Sort links by the path of a linked file*/
|
||||
parray_qsort(links, pgFileCompareLinked);
|
||||
|
||||
if (log_level_console <= LOG || log_level_file <= LOG)
|
||||
if (logger_config.log_level_console <= LOG ||
|
||||
logger_config.log_level_file <= LOG)
|
||||
elog(LOG, "check tablespace directories of backup %s",
|
||||
base36enc(backup->start_time));
|
||||
|
||||
|
28
src/init.c
28
src/init.c
@ -21,7 +21,7 @@ do_init(void)
|
||||
{
|
||||
char path[MAXPGPATH];
|
||||
char arclog_path_dir[MAXPGPATH];
|
||||
int results;
|
||||
int results;
|
||||
|
||||
results = pg_check_dir(backup_path);
|
||||
if (results == 4) /* exists and not empty*/
|
||||
@ -54,17 +54,16 @@ do_add_instance(void)
|
||||
char path[MAXPGPATH];
|
||||
char arclog_path_dir[MAXPGPATH];
|
||||
struct stat st;
|
||||
pgBackupConfig *config = pgut_new(pgBackupConfig);
|
||||
|
||||
/* PGDATA is always required */
|
||||
if (pgdata == NULL)
|
||||
if (instance_config.pgdata == NULL)
|
||||
elog(ERROR, "Required parameter not specified: PGDATA "
|
||||
"(-D, --pgdata)");
|
||||
|
||||
/* Read system_identifier from PGDATA */
|
||||
system_identifier = get_system_identifier(pgdata);
|
||||
instance_config.system_identifier = get_system_identifier(instance_config.pgdata);
|
||||
/* Starting from PostgreSQL 11 read WAL segment size from PGDATA */
|
||||
xlog_seg_size = get_xlog_seg_size(pgdata);
|
||||
instance_config.xlog_seg_size = get_xlog_seg_size(instance_config.pgdata);
|
||||
|
||||
/* Ensure that all root directories already exist */
|
||||
if (access(backup_path, F_OK) != 0)
|
||||
@ -93,14 +92,19 @@ do_add_instance(void)
|
||||
dir_create_dir(arclog_path, DIR_PERMISSION);
|
||||
|
||||
/*
|
||||
* Write initial config. system-identifier and pgdata are set in
|
||||
* init subcommand and will never be updated.
|
||||
* Write initial configuration file.
|
||||
* system-identifier, xlog-seg-size and pgdata are set in init subcommand
|
||||
* and will never be updated.
|
||||
*
|
||||
* We need to manually set options source to save them to the configuration
|
||||
* file.
|
||||
*/
|
||||
pgBackupConfigInit(config);
|
||||
config->system_identifier = system_identifier;
|
||||
config->xlog_seg_size = xlog_seg_size;
|
||||
config->pgdata = pgdata;
|
||||
writeBackupCatalogConfigFile(config);
|
||||
config_set_opt(instance_options, &instance_config.system_identifier,
|
||||
SOURCE_FILE);
|
||||
config_set_opt(instance_options, &instance_config.xlog_seg_size,
|
||||
SOURCE_FILE);
|
||||
/* pgdata was set through command line */
|
||||
do_set_config();
|
||||
|
||||
elog(INFO, "Instance '%s' successfully inited", instance_name);
|
||||
return 0;
|
||||
|
10
src/merge.c
10
src/merge.c
@ -295,9 +295,9 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
}
|
||||
/* compute size of wal files of this backup stored in the archive */
|
||||
if (!to_backup->stream)
|
||||
to_backup->wal_bytes = xlog_seg_size *
|
||||
(to_backup->stop_lsn / xlog_seg_size -
|
||||
to_backup->start_lsn / xlog_seg_size + 1);
|
||||
to_backup->wal_bytes = instance_config.xlog_seg_size *
|
||||
(to_backup->stop_lsn / instance_config.xlog_seg_size -
|
||||
to_backup->start_lsn / instance_config.xlog_seg_size + 1);
|
||||
else
|
||||
to_backup->wal_bytes = BYTES_INVALID;
|
||||
|
||||
@ -524,9 +524,11 @@ merge_files(void *arg)
|
||||
* do that.
|
||||
*/
|
||||
file->write_size = pgFileSize(to_path_tmp);
|
||||
file->crc = pgFileGetCRC(to_path_tmp, false);
|
||||
file->crc = pgFileGetCRC(to_path_tmp, true, true, NULL, FIO_LOCAL_HOST);
|
||||
}
|
||||
}
|
||||
else if (strcmp(file->name, "pg_control") == 0)
|
||||
copy_pgcontrol_file(argument->from_root, argument->to_root, file, FIO_LOCAL_HOST);
|
||||
else
|
||||
copy_file(argument->from_root, argument->to_root, file, FIO_LOCAL_HOST);
|
||||
|
||||
|
146
src/parsexlog.c
146
src/parsexlog.c
@ -88,7 +88,7 @@ static bool getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime
|
||||
|
||||
typedef struct XLogPageReadPrivate
|
||||
{
|
||||
int thread_num;
|
||||
int thread_num;
|
||||
const char *archivedir;
|
||||
TimeLineID tli;
|
||||
uint32 xlog_seg_size;
|
||||
@ -101,6 +101,7 @@ typedef struct XLogPageReadPrivate
|
||||
char xlogpath[MAXPGPATH];
|
||||
bool xlogexists;
|
||||
fio_location location;
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
gzFile gz_xlogfile;
|
||||
char gz_xlogpath[MAXPGPATH];
|
||||
@ -133,8 +134,7 @@ static XLogReaderState *InitXLogPageRead(XLogPageReadPrivate *private_data,
|
||||
TimeLineID tli, uint32 xlog_seg_size,
|
||||
bool allocate_reader);
|
||||
static void CleanupXLogPageRead(XLogReaderState *xlogreader);
|
||||
static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data,
|
||||
int elevel);
|
||||
static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data, int elevel);
|
||||
|
||||
static XLogSegNo nextSegNoToRead = 0;
|
||||
static pthread_mutex_t wal_segment_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
@ -230,7 +230,7 @@ doExtractPageMap(void *arg)
|
||||
#endif
|
||||
if (xlogreader == NULL)
|
||||
elog(ERROR, "Thread [%d]: out of memory", private_data->thread_num);
|
||||
xlogreader->system_identifier = system_identifier;
|
||||
xlogreader->system_identifier = instance_config.system_identifier;
|
||||
|
||||
found = XLogFindNextRecord(xlogreader, extract_arg->startpoint);
|
||||
|
||||
@ -240,11 +240,17 @@ doExtractPageMap(void *arg)
|
||||
*/
|
||||
if (XLogRecPtrIsInvalid(found))
|
||||
{
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X. %s",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint),
|
||||
(xlogreader->errormsg_buf[0] != '\0')?xlogreader->errormsg_buf:"");
|
||||
if (xlogreader->errormsg_buf[0] != '\0')
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X: %s",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint),
|
||||
xlogreader->errormsg_buf);
|
||||
else
|
||||
elog(WARNING, "Thread [%d]: could not read WAL record at %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (extract_arg->startpoint >> 32),
|
||||
(uint32) (extract_arg->startpoint));
|
||||
PrintXLogCorruptionMsg(private_data, ERROR);
|
||||
}
|
||||
extract_arg->startpoint = found;
|
||||
@ -767,6 +773,116 @@ wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get LSN of last or prior record within the WAL segment with number 'segno'.
|
||||
* If 'start_lsn'
|
||||
* is in the segment with number 'segno' then start from 'start_lsn', otherwise
|
||||
* start from offset 0 within the segment.
|
||||
*
|
||||
* Returns LSN which points to end+1 of the last WAL record if seek_prev_segment
|
||||
* is true. Otherwise returns LSN of the record prior to stop_lsn.
|
||||
*/
|
||||
XLogRecPtr
|
||||
get_last_wal_lsn(const char *archivedir, XLogRecPtr start_lsn,
|
||||
XLogRecPtr stop_lsn, TimeLineID tli, bool seek_prev_segment,
|
||||
uint32 seg_size)
|
||||
{
|
||||
XLogReaderState *xlogreader;
|
||||
XLogPageReadPrivate private;
|
||||
XLogRecPtr startpoint;
|
||||
XLogSegNo start_segno;
|
||||
XLogSegNo segno;
|
||||
XLogRecPtr res = InvalidXLogRecPtr;
|
||||
|
||||
GetXLogSegNo(stop_lsn, segno, seg_size);
|
||||
|
||||
if (segno <= 1)
|
||||
elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno);
|
||||
|
||||
if (seek_prev_segment)
|
||||
segno = segno - 1;
|
||||
|
||||
xlogreader = InitXLogPageRead(&private, archivedir, tli, seg_size, true);
|
||||
|
||||
/*
|
||||
* Calculate startpoint. Decide: we should use 'start_lsn' or offset 0.
|
||||
*/
|
||||
GetXLogSegNo(start_lsn, start_segno, seg_size);
|
||||
if (start_segno == segno)
|
||||
startpoint = start_lsn;
|
||||
else
|
||||
{
|
||||
XLogRecPtr found;
|
||||
|
||||
GetXLogRecPtr(segno, 0, seg_size, startpoint);
|
||||
found = XLogFindNextRecord(xlogreader, startpoint);
|
||||
|
||||
if (XLogRecPtrIsInvalid(found))
|
||||
{
|
||||
if (xlogreader->errormsg_buf[0] != '\0')
|
||||
elog(WARNING, "Could not read WAL record at %X/%X: %s",
|
||||
(uint32) (startpoint >> 32), (uint32) (startpoint),
|
||||
xlogreader->errormsg_buf);
|
||||
else
|
||||
elog(WARNING, "Could not read WAL record at %X/%X",
|
||||
(uint32) (startpoint >> 32), (uint32) (startpoint));
|
||||
PrintXLogCorruptionMsg(&private, ERROR);
|
||||
}
|
||||
startpoint = found;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
XLogRecord *record;
|
||||
char *errormsg;
|
||||
XLogSegNo next_segno = 0;
|
||||
|
||||
if (interrupted)
|
||||
elog(ERROR, "Interrupted during WAL reading");
|
||||
|
||||
record = XLogReadRecord(xlogreader, startpoint, &errormsg);
|
||||
if (record == NULL)
|
||||
{
|
||||
XLogRecPtr errptr;
|
||||
|
||||
errptr = XLogRecPtrIsInvalid(startpoint) ? xlogreader->EndRecPtr :
|
||||
startpoint;
|
||||
|
||||
if (errormsg)
|
||||
elog(WARNING, "Could not read WAL record at %X/%X: %s",
|
||||
(uint32) (errptr >> 32), (uint32) (errptr),
|
||||
errormsg);
|
||||
else
|
||||
elog(WARNING, "Could not read WAL record at %X/%X",
|
||||
(uint32) (errptr >> 32), (uint32) (errptr));
|
||||
PrintXLogCorruptionMsg(&private, ERROR);
|
||||
}
|
||||
|
||||
/* continue reading at next record */
|
||||
startpoint = InvalidXLogRecPtr;
|
||||
|
||||
GetXLogSegNo(xlogreader->EndRecPtr, next_segno, seg_size);
|
||||
if (next_segno > segno)
|
||||
break;
|
||||
|
||||
if (seek_prev_segment)
|
||||
{
|
||||
/* end+1 of last record read */
|
||||
res = xlogreader->EndRecPtr;
|
||||
}
|
||||
else
|
||||
res = xlogreader->ReadRecPtr;
|
||||
|
||||
if (xlogreader->EndRecPtr >= stop_lsn)
|
||||
break;
|
||||
}
|
||||
|
||||
CleanupXLogPageRead(xlogreader);
|
||||
XLogReaderFree(xlogreader);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/*
|
||||
* Show error during work with compressed file
|
||||
@ -808,14 +924,14 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
|
||||
if (!IsInXLogSeg(targetPagePtr, private_data->xlogsegno,
|
||||
private_data->xlog_seg_size))
|
||||
{
|
||||
elog(VERBOSE, "Thread [%d]: Need to switch to segno next to %X/%X, current LSN %X/%X",
|
||||
elog(VERBOSE, "Thread [%d]: Need to switch to the next WAL segment, page LSN %X/%X, record being read LSN %X/%X",
|
||||
private_data->thread_num,
|
||||
(uint32) (targetPagePtr >> 32), (uint32) (targetPagePtr),
|
||||
(uint32) (xlogreader->currRecPtr >> 32),
|
||||
(uint32) (xlogreader->currRecPtr ));
|
||||
|
||||
/*
|
||||
* if the last record on the page is not complete,
|
||||
* If the last record on the page is not complete,
|
||||
* we must continue reading pages in the same thread
|
||||
*/
|
||||
if (!XLogRecPtrIsInvalid(xlogreader->currRecPtr) &&
|
||||
@ -981,7 +1097,7 @@ InitXLogPageRead(XLogPageReadPrivate *private_data, const char *archivedir,
|
||||
#endif
|
||||
if (xlogreader == NULL)
|
||||
elog(ERROR, "out of memory");
|
||||
xlogreader->system_identifier = system_identifier;
|
||||
xlogreader->system_identifier = instance_config.system_identifier;
|
||||
}
|
||||
|
||||
return xlogreader;
|
||||
@ -1037,6 +1153,12 @@ PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data, int elevel)
|
||||
private_data->gz_xlogpath);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Cannot tell what happened specifically */
|
||||
elog(elevel, "Thread [%d]: An error occured during WAL reading",
|
||||
private_data->thread_num);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -16,10 +16,11 @@
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "utils/configuration.h"
|
||||
#include "utils/thread.h"
|
||||
#include <time.h>
|
||||
|
||||
const char *PROGRAM_VERSION = "2.0.24";
|
||||
const char *PROGRAM_VERSION = "2.0.25";
|
||||
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
|
||||
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
|
||||
|
||||
@ -43,7 +44,6 @@ typedef enum ProbackupSubcmd
|
||||
|
||||
/* directory options */
|
||||
char *backup_path = NULL;
|
||||
char *pgdata = NULL;
|
||||
/*
|
||||
* path or to the data files in the backup catalog
|
||||
* $BACKUP_PATH/backups/instance_name
|
||||
@ -67,13 +67,6 @@ char *replication_slot = NULL;
|
||||
/* backup options */
|
||||
bool backup_logs = false;
|
||||
bool smooth_checkpoint;
|
||||
/* Wait timeout for WAL segment archiving */
|
||||
uint32 archive_timeout = ARCHIVE_TIMEOUT_DEFAULT;
|
||||
const char *master_db = NULL;
|
||||
const char *master_host = NULL;
|
||||
const char *master_port= NULL;
|
||||
const char *master_user = NULL;
|
||||
uint32 replica_timeout = REPLICA_TIMEOUT_DEFAULT;
|
||||
char *remote_host;
|
||||
char *remote_port;
|
||||
char *remote_proto = (char*)"ssh";
|
||||
@ -102,32 +95,13 @@ bool skip_block_validation = false;
|
||||
/* delete options */
|
||||
bool delete_wal = false;
|
||||
bool delete_expired = false;
|
||||
bool apply_to_all = false;
|
||||
bool force_delete = false;
|
||||
|
||||
/* retention options */
|
||||
uint32 retention_redundancy = 0;
|
||||
uint32 retention_window = 0;
|
||||
|
||||
/* compression options */
|
||||
CompressAlg compress_alg = COMPRESS_ALG_DEFAULT;
|
||||
int compress_level = COMPRESS_LEVEL_DEFAULT;
|
||||
bool compress_shortcut = false;
|
||||
|
||||
|
||||
/* other options */
|
||||
char *instance_name;
|
||||
uint64 system_identifier = 0;
|
||||
|
||||
/*
|
||||
* Starting from PostgreSQL 11 WAL segment size may vary. Prior to
|
||||
* PostgreSQL 10 xlog_seg_size is equal to XLOG_SEG_SIZE.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
uint32 xlog_seg_size = 0;
|
||||
#else
|
||||
uint32 xlog_seg_size = XLOG_SEG_SIZE;
|
||||
#endif
|
||||
|
||||
/* archive push options */
|
||||
static char *wal_file_path;
|
||||
@ -143,98 +117,69 @@ static ProbackupSubcmd backup_subcmd = NO_CMD;
|
||||
|
||||
static bool help_opt = false;
|
||||
|
||||
static void opt_backup_mode(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_console(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_file(pgut_option *opt, const char *arg);
|
||||
static void opt_compress_alg(pgut_option *opt, const char *arg);
|
||||
static void opt_show_format(pgut_option *opt, const char *arg);
|
||||
static void opt_backup_mode(ConfigOption *opt, const char *arg);
|
||||
static void opt_show_format(ConfigOption *opt, const char *arg);
|
||||
|
||||
static void compress_init(void);
|
||||
|
||||
static pgut_option options[] =
|
||||
/*
|
||||
* Short name should be non-printable ASCII character.
|
||||
*/
|
||||
static ConfigOption cmd_options[] =
|
||||
{
|
||||
/* directory options */
|
||||
{ 'b', 1, "help", &help_opt, SOURCE_CMDLINE },
|
||||
{ 's', 'D', "pgdata", &pgdata, SOURCE_CMDLINE },
|
||||
{ 's', 'B', "backup-path", &backup_path, SOURCE_CMDLINE },
|
||||
{ 'b', 130, "help", &help_opt, SOURCE_CMD_STRICT },
|
||||
{ 's', 'B', "backup-path", &backup_path, SOURCE_CMD_STRICT },
|
||||
/* common options */
|
||||
{ 'u', 'j', "threads", &num_threads, SOURCE_CMDLINE },
|
||||
{ 'b', 2, "stream", &stream_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 3, "progress", &progress, SOURCE_CMDLINE },
|
||||
{ 's', 'i', "backup-id", &backup_id_string, SOURCE_CMDLINE },
|
||||
{ 'u', 'j', "threads", &num_threads, SOURCE_CMD_STRICT },
|
||||
{ 'b', 131, "stream", &stream_wal, SOURCE_CMD_STRICT },
|
||||
{ 'b', 132, "progress", &progress, SOURCE_CMD_STRICT },
|
||||
{ 's', 'i', "backup-id", &backup_id_string, SOURCE_CMD_STRICT },
|
||||
/* backup options */
|
||||
{ 'b', 10, "backup-pg-log", &backup_logs, SOURCE_CMDLINE },
|
||||
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMDLINE },
|
||||
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMDLINE },
|
||||
{ 's', 'S', "slot", &replication_slot, SOURCE_CMDLINE },
|
||||
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 'b', 12, "delete-wal", &delete_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 13, "delete-expired", &delete_expired, SOURCE_CMDLINE },
|
||||
{ 's', 14, "master-db", &master_db, SOURCE_CMDLINE, },
|
||||
{ 's', 15, "master-host", &master_host, SOURCE_CMDLINE, },
|
||||
{ 's', 16, "master-port", &master_port, SOURCE_CMDLINE, },
|
||||
{ 's', 17, "master-user", &master_user, SOURCE_CMDLINE, },
|
||||
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 's', 19, "remote-host", &remote_host, SOURCE_CMDLINE, },
|
||||
{ 's', 20, "remote-port", &remote_port, SOURCE_CMDLINE, },
|
||||
{ 's', 21, "remote-proto", &remote_proto, SOURCE_CMDLINE, },
|
||||
{ 's', 22, "ssh-config", &ssh_config, SOURCE_CMDLINE, },
|
||||
{ 's', 23, "ssh-options", &ssh_options, SOURCE_CMDLINE, },
|
||||
{ 'b', 24, "agent", &is_remote_agent, SOURCE_CMDLINE, },
|
||||
{ 'b', 25, "remote", &is_remote_backup, SOURCE_CMDLINE, },
|
||||
{ 'b', 133, "backup-pg-log", &backup_logs, SOURCE_CMD_STRICT },
|
||||
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT },
|
||||
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT },
|
||||
{ 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT },
|
||||
{ 'b', 134, "delete-wal", &delete_wal, SOURCE_CMD_STRICT },
|
||||
{ 'b', 135, "delete-expired", &delete_expired, SOURCE_CMD_STRICT },
|
||||
{ 's', 19, "remote-host", &remote_host, SOURCE_CMD_STRICT, },
|
||||
{ 's', 20, "remote-port", &remote_port, SOURCE_CMD_STRICT, },
|
||||
{ 's', 21, "remote-proto", &remote_proto, SOURCE_CMD_STRICT, },
|
||||
{ 's', 22, "ssh-config", &ssh_config, SOURCE_CMD_STRICT, },
|
||||
{ 's', 23, "ssh-options", &ssh_options, SOURCE_CMD_STRICT, },
|
||||
{ 'b', 24, "agent", &is_remote_agent, SOURCE_CMD_STRICT, },
|
||||
{ 'b', 25, "remote", &is_remote_backup, SOURCE_CMD_STRICT, },
|
||||
/* restore options */
|
||||
{ 's', 30, "time", &target_time, SOURCE_CMDLINE },
|
||||
{ 's', 31, "xid", &target_xid, SOURCE_CMDLINE },
|
||||
{ 's', 32, "inclusive", &target_inclusive, SOURCE_CMDLINE },
|
||||
{ 'u', 33, "timeline", &target_tli, SOURCE_CMDLINE },
|
||||
{ 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMDLINE },
|
||||
{ 'b', 34, "immediate", &target_immediate, SOURCE_CMDLINE },
|
||||
{ 's', 35, "recovery-target-name", &target_name, SOURCE_CMDLINE },
|
||||
{ 's', 36, "recovery-target-action", &target_action, SOURCE_CMDLINE },
|
||||
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMDLINE },
|
||||
{ 'b', 27, "no-validate", &restore_no_validate, SOURCE_CMDLINE },
|
||||
{ 's', 28, "lsn", &target_lsn, SOURCE_CMDLINE },
|
||||
{ 'b', 29, "skip-block-validation", &skip_block_validation, SOURCE_CMDLINE },
|
||||
{ 's', 136, "time", &target_time, SOURCE_CMD_STRICT },
|
||||
{ 's', 137, "xid", &target_xid, SOURCE_CMD_STRICT },
|
||||
{ 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT },
|
||||
{ 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT },
|
||||
{ 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT },
|
||||
{ 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT },
|
||||
{ 's', 141, "recovery-target-name", &target_name, SOURCE_CMD_STRICT },
|
||||
{ 's', 142, "recovery-target-action", &target_action, SOURCE_CMD_STRICT },
|
||||
{ 'b', 'R', "restore-as-replica", &restore_as_replica, SOURCE_CMD_STRICT },
|
||||
{ 'b', 143, "no-validate", &restore_no_validate, SOURCE_CMD_STRICT },
|
||||
{ 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT },
|
||||
{ 'b', 154, "skip-block-validation", &skip_block_validation, SOURCE_CMD_STRICT },
|
||||
/* delete options */
|
||||
{ 'b', 130, "wal", &delete_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 131, "expired", &delete_expired, SOURCE_CMDLINE },
|
||||
{ 'b', 132, "all", &apply_to_all, SOURCE_CMDLINE },
|
||||
{ 'b', 145, "wal", &delete_wal, SOURCE_CMD_STRICT },
|
||||
{ 'b', 146, "expired", &delete_expired, SOURCE_CMD_STRICT },
|
||||
/* TODO not implemented yet */
|
||||
{ 'b', 133, "force", &force_delete, SOURCE_CMDLINE },
|
||||
/* retention options */
|
||||
{ 'u', 134, "retention-redundancy", &retention_redundancy, SOURCE_CMDLINE },
|
||||
{ 'u', 135, "retention-window", &retention_window, SOURCE_CMDLINE },
|
||||
{ 'b', 147, "force", &force_delete, SOURCE_CMD_STRICT },
|
||||
/* compression options */
|
||||
{ 'f', 136, "compress-algorithm", opt_compress_alg, SOURCE_CMDLINE },
|
||||
{ 'u', 137, "compress-level", &compress_level, SOURCE_CMDLINE },
|
||||
{ 'b', 138, "compress", &compress_shortcut, SOURCE_CMDLINE },
|
||||
/* logging options */
|
||||
{ 'f', 140, "log-level-console", opt_log_level_console, SOURCE_CMDLINE },
|
||||
{ 'f', 141, "log-level-file", opt_log_level_file, SOURCE_CMDLINE },
|
||||
{ 's', 142, "log-filename", &log_filename, SOURCE_CMDLINE },
|
||||
{ 's', 143, "error-log-filename", &error_log_filename, SOURCE_CMDLINE },
|
||||
{ 's', 144, "log-directory", &log_directory, SOURCE_CMDLINE },
|
||||
{ 'U', 145, "log-rotation-size", &log_rotation_size, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_KB },
|
||||
{ 'U', 146, "log-rotation-age", &log_rotation_age, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{ 'b', 148, "compress", &compress_shortcut, SOURCE_CMD_STRICT },
|
||||
/* connection options */
|
||||
{ 's', 'd', "pgdatabase", &pgut_dbname, SOURCE_CMDLINE },
|
||||
{ 's', 'h', "pghost", &host, SOURCE_CMDLINE },
|
||||
{ 's', 'p', "pgport", &port, SOURCE_CMDLINE },
|
||||
{ 's', 'U', "pguser", &username, SOURCE_CMDLINE },
|
||||
{ 'B', 'w', "no-password", &prompt_password, SOURCE_CMDLINE },
|
||||
{ 'b', 'W', "password", &force_password, SOURCE_CMDLINE },
|
||||
{ 'B', 'w', "no-password", &prompt_password, SOURCE_CMD_STRICT },
|
||||
{ 'b', 'W', "password", &force_password, SOURCE_CMD_STRICT },
|
||||
/* other options */
|
||||
{ 'U', 150, "system-identifier", &system_identifier, SOURCE_FILE_STRICT },
|
||||
{ 's', 151, "instance", &instance_name, SOURCE_CMDLINE },
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
{ 'u', 152, "xlog-seg-size", &xlog_seg_size, SOURCE_FILE_STRICT},
|
||||
#endif
|
||||
{ 's', 149, "instance", &instance_name, SOURCE_CMD_STRICT },
|
||||
/* archive-push options */
|
||||
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
|
||||
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
|
||||
{ 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE },
|
||||
{ 's', 150, "wal-file-path", &wal_file_path, SOURCE_CMD_STRICT },
|
||||
{ 's', 151, "wal-file-name", &wal_file_name, SOURCE_CMD_STRICT },
|
||||
{ 'b', 152, "overwrite", &file_overwrite, SOURCE_CMD_STRICT },
|
||||
/* show options */
|
||||
{ 'f', 170, "format", opt_show_format, SOURCE_CMDLINE },
|
||||
{ 'f', 153, "format", opt_show_format, SOURCE_CMD_STRICT },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
@ -250,9 +195,12 @@ main(int argc, char *argv[])
|
||||
struct stat stat_buf;
|
||||
int rc;
|
||||
|
||||
/* initialize configuration */
|
||||
/* Initialize current backup */
|
||||
pgBackupInit(¤t);
|
||||
|
||||
/* Initialize current instance configuration */
|
||||
init_config(&instance_config);
|
||||
|
||||
PROGRAM_NAME = get_progname(argv[0]);
|
||||
set_pglocale_pgservice(argv[0], "pgscripts");
|
||||
|
||||
@ -365,8 +313,10 @@ main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
optind += 1;
|
||||
/* Parse command line arguments */
|
||||
pgut_getopt(argc, argv, options);
|
||||
/* Parse command line only arguments */
|
||||
config_get_opt(argc, argv, cmd_options, instance_options);
|
||||
|
||||
pgut_init();
|
||||
|
||||
if (help_opt)
|
||||
help_command(command_name);
|
||||
@ -459,34 +409,36 @@ main(int argc, char *argv[])
|
||||
* Read options from env variables or from config file,
|
||||
* unless we're going to set them via set-config.
|
||||
*/
|
||||
if (instance_name && backup_subcmd != SET_CONFIG_CMD)
|
||||
if (instance_name)
|
||||
{
|
||||
char path[MAXPGPATH];
|
||||
|
||||
/* Read environment variables */
|
||||
pgut_getopt_env(options);
|
||||
config_get_opt_env(instance_options);
|
||||
|
||||
/* Read options from configuration file */
|
||||
join_path_components(path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
pgut_readopt(path, options, ERROR, true);
|
||||
config_read_opt(path, instance_options, ERROR, true);
|
||||
}
|
||||
|
||||
/* Initialize logger */
|
||||
init_logger(backup_path);
|
||||
init_logger(backup_path, &instance_config.logger);
|
||||
|
||||
/*
|
||||
* We have read pgdata path from command line or from configuration file.
|
||||
* Ensure that pgdata is an absolute path.
|
||||
*/
|
||||
if (pgdata != NULL && !is_absolute_path(pgdata))
|
||||
if (instance_config.pgdata != NULL &&
|
||||
!is_absolute_path(instance_config.pgdata))
|
||||
elog(ERROR, "-D, --pgdata must be an absolute path");
|
||||
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
/* Check xlog-seg-size option */
|
||||
if (instance_name &&
|
||||
backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
|
||||
backup_subcmd != ADD_INSTANCE_CMD && !IsValidWalSegSize(xlog_seg_size))
|
||||
elog(ERROR, "Invalid WAL segment size %u", xlog_seg_size);
|
||||
backup_subcmd != ADD_INSTANCE_CMD && backup_subcmd != SET_CONFIG_CMD &&
|
||||
!IsValidWalSegSize(instance_config.xlog_seg_size))
|
||||
elog(ERROR, "Invalid WAL segment size %u", instance_config.xlog_seg_size);
|
||||
#endif
|
||||
|
||||
/* Sanity check of --backup-id option */
|
||||
@ -506,12 +458,12 @@ main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* Setup stream options. They are used in streamutil.c. */
|
||||
if (host != NULL)
|
||||
dbhost = pstrdup(host);
|
||||
if (port != NULL)
|
||||
dbport = pstrdup(port);
|
||||
if (username != NULL)
|
||||
dbuser = pstrdup(username);
|
||||
if (instance_config.pghost != NULL)
|
||||
dbhost = pstrdup(instance_config.pghost);
|
||||
if (instance_config.pgport != NULL)
|
||||
dbport = pstrdup(instance_config.pgport);
|
||||
if (instance_config.pguser != NULL)
|
||||
dbuser = pstrdup(instance_config.pguser);
|
||||
|
||||
/* setup exclusion list for file search */
|
||||
if (!backup_logs)
|
||||
@ -603,9 +555,11 @@ main(int argc, char *argv[])
|
||||
do_merge(current.backup_id);
|
||||
break;
|
||||
case SHOW_CONFIG_CMD:
|
||||
return do_configure(true);
|
||||
do_show_config();
|
||||
break;
|
||||
case SET_CONFIG_CMD:
|
||||
return do_configure(false);
|
||||
do_set_config();
|
||||
break;
|
||||
case NO_CMD:
|
||||
/* Should not happen */
|
||||
elog(ERROR, "Unknown subcommand");
|
||||
@ -615,25 +569,13 @@ main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
static void
|
||||
opt_backup_mode(pgut_option *opt, const char *arg)
|
||||
opt_backup_mode(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
current.backup_mode = parse_backup_mode(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_log_level_console(pgut_option *opt, const char *arg)
|
||||
{
|
||||
log_level_console = parse_log_level(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_log_level_file(pgut_option *opt, const char *arg)
|
||||
{
|
||||
log_level_file = parse_log_level(arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_show_format(pgut_option *opt, const char *arg)
|
||||
opt_show_format(ConfigOption *opt, const char *arg)
|
||||
{
|
||||
const char *v = arg;
|
||||
size_t len;
|
||||
@ -656,12 +598,6 @@ opt_show_format(pgut_option *opt, const char *arg)
|
||||
elog(ERROR, "Invalid show format \"%s\"", arg);
|
||||
}
|
||||
|
||||
static void
|
||||
opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
{
|
||||
compress_alg = parse_compress_alg(arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize compress and sanity checks for compress.
|
||||
*/
|
||||
@ -670,20 +606,20 @@ compress_init(void)
|
||||
{
|
||||
/* Default algorithm is zlib */
|
||||
if (compress_shortcut)
|
||||
compress_alg = ZLIB_COMPRESS;
|
||||
instance_config.compress_alg = ZLIB_COMPRESS;
|
||||
|
||||
if (backup_subcmd != SET_CONFIG_CMD)
|
||||
{
|
||||
if (compress_level != COMPRESS_LEVEL_DEFAULT
|
||||
&& compress_alg == NOT_DEFINED_COMPRESS)
|
||||
if (instance_config.compress_level != COMPRESS_LEVEL_DEFAULT
|
||||
&& instance_config.compress_alg == NOT_DEFINED_COMPRESS)
|
||||
elog(ERROR, "Cannot specify compress-level option without compress-alg option");
|
||||
}
|
||||
|
||||
if (compress_level < 0 || compress_level > 9)
|
||||
if (instance_config.compress_level < 0 || instance_config.compress_level > 9)
|
||||
elog(ERROR, "--compress-level value must be in the range from 0 to 9");
|
||||
|
||||
if (compress_level == 0)
|
||||
compress_alg = NOT_DEFINED_COMPRESS;
|
||||
if (instance_config.compress_level == 0)
|
||||
instance_config.compress_alg = NOT_DEFINED_COMPRESS;
|
||||
|
||||
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
|
||||
{
|
||||
@ -692,7 +628,7 @@ compress_init(void)
|
||||
elog(ERROR, "This build does not support zlib compression");
|
||||
else
|
||||
#endif
|
||||
if (compress_alg == PGLZ_COMPRESS && num_threads > 1)
|
||||
if (instance_config.compress_alg == PGLZ_COMPRESS && num_threads > 1)
|
||||
elog(ERROR, "Multithread backup does not support pglz compression");
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <port/atomics.h>
|
||||
#endif
|
||||
|
||||
#include "utils/configuration.h"
|
||||
#include "utils/logger.h"
|
||||
#include "utils/parray.h"
|
||||
#include "utils/pgut.h"
|
||||
@ -48,6 +49,10 @@
|
||||
#define PG_BLACK_LIST "black_list"
|
||||
#define PG_TABLESPACE_MAP_FILE "tablespace_map"
|
||||
|
||||
/* Timeout defaults */
|
||||
#define ARCHIVE_TIMEOUT_DEFAULT 300
|
||||
#define REPLICA_TIMEOUT_DEFAULT 300
|
||||
|
||||
/* Direcotry/File permission */
|
||||
#define DIR_PERMISSION (0700)
|
||||
#define FILE_PERMISSION (0600)
|
||||
@ -91,6 +96,7 @@ do { \
|
||||
FIN_TRADITIONAL_CRC32(crc); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Information about single file (or dir) in backup */
|
||||
typedef struct pgFile
|
||||
{
|
||||
@ -162,7 +168,11 @@ typedef enum ShowFormat
|
||||
#define BYTES_INVALID (-1)
|
||||
#define BLOCKNUM_INVALID (-1)
|
||||
|
||||
typedef struct pgBackupConfig
|
||||
/*
|
||||
* An instance configuration. It can be stored in a configuration file or passed
|
||||
* from command line.
|
||||
*/
|
||||
typedef struct InstanceConfig
|
||||
{
|
||||
uint64 system_identifier;
|
||||
uint32 xlog_seg_size;
|
||||
@ -177,24 +187,24 @@ typedef struct pgBackupConfig
|
||||
const char *master_port;
|
||||
const char *master_db;
|
||||
const char *master_user;
|
||||
int replica_timeout;
|
||||
uint32 replica_timeout;
|
||||
|
||||
int archive_timeout;
|
||||
/* Wait timeout for WAL segment archiving */
|
||||
uint32 archive_timeout;
|
||||
|
||||
int log_level_console;
|
||||
int log_level_file;
|
||||
char *log_filename;
|
||||
char *error_log_filename;
|
||||
char *log_directory;
|
||||
uint64 log_rotation_size;
|
||||
uint64 log_rotation_age;
|
||||
/* Logger parameters */
|
||||
LoggerConfig logger;
|
||||
|
||||
/* Retention options. 0 disables the option. */
|
||||
uint32 retention_redundancy;
|
||||
uint32 retention_window;
|
||||
|
||||
CompressAlg compress_alg;
|
||||
int compress_level;
|
||||
} pgBackupConfig;
|
||||
} InstanceConfig;
|
||||
|
||||
extern ConfigOption instance_options[];
|
||||
extern InstanceConfig instance_config;
|
||||
|
||||
typedef struct pgBackup pgBackup;
|
||||
|
||||
@ -337,7 +347,6 @@ typedef struct
|
||||
/* directory options */
|
||||
extern char *backup_path;
|
||||
extern char backup_instance_path[MAXPGPATH];
|
||||
extern char *pgdata;
|
||||
extern char arclog_path[MAXPGPATH];
|
||||
|
||||
/* common options */
|
||||
@ -351,21 +360,13 @@ extern char *replication_slot;
|
||||
|
||||
/* backup options */
|
||||
extern bool smooth_checkpoint;
|
||||
#define ARCHIVE_TIMEOUT_DEFAULT 300
|
||||
extern uint32 archive_timeout;
|
||||
extern char *remote_port;
|
||||
extern char *remote_host;
|
||||
extern char *remote_proto;
|
||||
extern char *ssh_config;
|
||||
extern char *ssh_options;
|
||||
extern const char *master_db;
|
||||
extern const char *master_host;
|
||||
extern const char *master_port;
|
||||
extern const char *master_user;
|
||||
extern bool is_remote_backup;
|
||||
extern bool is_remote_agent;
|
||||
#define REPLICA_TIMEOUT_DEFAULT 300
|
||||
extern uint32 replica_timeout;
|
||||
|
||||
extern bool is_ptrack_support;
|
||||
extern bool is_checksum_enabled;
|
||||
@ -378,25 +379,13 @@ extern bool skip_block_validation;
|
||||
/* delete options */
|
||||
extern bool delete_wal;
|
||||
extern bool delete_expired;
|
||||
extern bool apply_to_all;
|
||||
extern bool force_delete;
|
||||
|
||||
/* retention options. 0 disables the option */
|
||||
#define RETENTION_REDUNDANCY_DEFAULT 0
|
||||
#define RETENTION_WINDOW_DEFAULT 0
|
||||
|
||||
extern uint32 retention_redundancy;
|
||||
extern uint32 retention_window;
|
||||
|
||||
/* compression options */
|
||||
extern CompressAlg compress_alg;
|
||||
extern int compress_level;
|
||||
extern bool compress_shortcut;
|
||||
|
||||
/* other options */
|
||||
extern char *instance_name;
|
||||
extern uint64 system_identifier;
|
||||
extern uint32 xlog_seg_size;
|
||||
|
||||
/* show options */
|
||||
extern ShowFormat show_format;
|
||||
@ -447,13 +436,9 @@ extern int do_archive_get(char *wal_file_path, char *wal_file_name);
|
||||
|
||||
|
||||
/* in configure.c */
|
||||
extern int do_configure(bool show_only);
|
||||
extern void pgBackupConfigInit(pgBackupConfig *config);
|
||||
extern void writeBackupCatalogConfig(FILE *out, pgBackupConfig *config);
|
||||
extern void writeBackupCatalogConfigFile(pgBackupConfig *config);
|
||||
extern pgBackupConfig* readBackupCatalogConfigFile(void);
|
||||
|
||||
extern uint32 get_config_xlog_seg_size(void);
|
||||
extern void do_show_config(void);
|
||||
extern void do_set_config(void);
|
||||
extern void init_config(InstanceConfig *config);
|
||||
|
||||
/* in show.c */
|
||||
extern int do_show(time_t requested_backup_id);
|
||||
@ -526,7 +511,7 @@ extern void create_data_directories(const char *data_dir,
|
||||
fio_location location);
|
||||
|
||||
extern void read_tablespace_map(parray *files, const char *backup_dir);
|
||||
extern void opt_tablespace_map(pgut_option *opt, const char *arg);
|
||||
extern void opt_tablespace_map(ConfigOption *opt, const char *arg);
|
||||
extern void check_tablespace_mapping(pgBackup *backup);
|
||||
|
||||
extern void print_file_list(FILE *out, const parray *files, const char *root);
|
||||
@ -542,7 +527,8 @@ extern pgFile *pgFileNew(const char *path, bool omit_symlink, fio_location locat
|
||||
extern pgFile *pgFileInit(const char *path);
|
||||
extern void pgFileDelete(pgFile *file);
|
||||
extern void pgFileFree(void *file);
|
||||
extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c);
|
||||
extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c,
|
||||
bool raise_on_deleted, size_t *bytes_read, fio_location location);
|
||||
extern int pgFileComparePath(const void *f1, const void *f2);
|
||||
extern int pgFileComparePathDesc(const void *f1, const void *f2);
|
||||
extern int pgFileCompareLinked(const void *f1, const void *f2);
|
||||
@ -564,10 +550,9 @@ extern void push_wal_file(const char *from_path, const char *to_path,
|
||||
bool is_compress, bool overwrite);
|
||||
extern void get_wal_file(const char *from_path, const char *to_path);
|
||||
|
||||
extern bool calc_file_checksum(pgFile *file);
|
||||
extern void calc_file_checksum(pgFile *file);
|
||||
|
||||
extern bool check_file_pages(pgFile* file,
|
||||
XLogRecPtr stop_lsn,
|
||||
extern bool check_file_pages(pgFile *file, XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version, uint32 backup_version);
|
||||
/* parsexlog.c */
|
||||
extern void extractPageMap(const char *archivedir,
|
||||
@ -587,15 +572,22 @@ extern bool read_recovery_info(const char *archivedir, TimeLineID tli,
|
||||
TransactionId *recovery_xid);
|
||||
extern bool wal_contains_lsn(const char *archivedir, XLogRecPtr target_lsn,
|
||||
TimeLineID target_tli, uint32 seg_size);
|
||||
extern XLogRecPtr get_last_wal_lsn(const char *archivedir, XLogRecPtr start_lsn,
|
||||
XLogRecPtr stop_lsn, TimeLineID tli,
|
||||
bool seek_prev_segment, uint32 seg_size);
|
||||
|
||||
/* in util.c */
|
||||
extern TimeLineID get_current_timeline(bool safe);
|
||||
extern XLogRecPtr get_checkpoint_location(PGconn *conn);
|
||||
extern uint64 get_system_identifier(char *pgdata);
|
||||
extern uint64 get_system_identifier(const char *pgdata_path);
|
||||
extern uint64 get_remote_system_identifier(PGconn *conn);
|
||||
extern uint32 get_data_checksum_version(bool safe);
|
||||
extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path);
|
||||
extern uint32 get_xlog_seg_size(char *pgdata_path);
|
||||
extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn);
|
||||
extern void set_min_recovery_point(pgFile *file, const char *backup_path,
|
||||
XLogRecPtr stop_backup_lsn);
|
||||
extern void copy_pgcontrol_file(const char *from_root, const char *to_root,
|
||||
pgFile *file, fio_location location);
|
||||
|
||||
extern void sanityChecks(void);
|
||||
extern void time2iso(char *buf, size_t len, time_t time);
|
||||
|
@ -60,12 +60,13 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
|
||||
if (is_restore)
|
||||
{
|
||||
if (pgdata == NULL)
|
||||
if (instance_config.pgdata == NULL)
|
||||
elog(ERROR,
|
||||
"required parameter not specified: PGDATA (-D, --pgdata)");
|
||||
/* Check if restore destination empty */
|
||||
if (!dir_is_empty(pgdata))
|
||||
elog(ERROR, "restore destination is not empty: \"%s\"", pgdata);
|
||||
if (!dir_is_empty(instance_config.pgdata))
|
||||
elog(ERROR, "restore destination is not empty: \"%s\"",
|
||||
instance_config.pgdata);
|
||||
}
|
||||
|
||||
if (instance_name == NULL)
|
||||
@ -324,7 +325,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
*/
|
||||
validate_wal(dest_backup, arclog_path, rt->recovery_target_time,
|
||||
rt->recovery_target_xid, rt->recovery_target_lsn,
|
||||
base_full_backup->tli, xlog_seg_size);
|
||||
base_full_backup->tli, instance_config.xlog_seg_size);
|
||||
}
|
||||
/* Orphinize every OK descendant of corrupted backup */
|
||||
else
|
||||
@ -449,7 +450,7 @@ restore_backup(pgBackup *backup)
|
||||
* this_backup_path = $BACKUP_PATH/backups/instance_name/backup_id
|
||||
*/
|
||||
pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL);
|
||||
create_data_directories(pgdata, this_backup_path, true, FIO_DB_HOST);
|
||||
create_data_directories(instance_config.pgdata, this_backup_path, true, FIO_DB_HOST);
|
||||
|
||||
/*
|
||||
* Get list of files which need to be restored.
|
||||
@ -501,7 +502,8 @@ restore_backup(pgBackup *backup)
|
||||
parray_walk(files, pgFileFree);
|
||||
parray_free(files);
|
||||
|
||||
if (log_level_console <= LOG || log_level_file <= LOG)
|
||||
if (logger_config.log_level_console <= LOG ||
|
||||
logger_config.log_level_file <= LOG)
|
||||
elog(LOG, "restore %s backup completed", base36enc(backup->start_time));
|
||||
}
|
||||
|
||||
@ -521,12 +523,12 @@ remove_deleted_files(pgBackup *backup)
|
||||
|
||||
pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST);
|
||||
/* Read backup's filelist using target database path as base path */
|
||||
files = dir_read_file_list(pgdata, filelist_path);
|
||||
files = dir_read_file_list(instance_config.pgdata, filelist_path);
|
||||
parray_qsort(files, pgFileComparePathDesc);
|
||||
|
||||
/* Get list of files actually existing in target database */
|
||||
files_restored = parray_new();
|
||||
dir_list_file(files_restored, pgdata, true, true, false);
|
||||
dir_list_file(files_restored, instance_config.pgdata, true, true, false);
|
||||
/* To delete from leaf, sort in reversed order */
|
||||
parray_qsort(files_restored, pgFileComparePathDesc);
|
||||
|
||||
@ -538,8 +540,10 @@ remove_deleted_files(pgBackup *backup)
|
||||
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
|
||||
{
|
||||
pgFileDelete(file);
|
||||
if (log_level_console <= LOG || log_level_file <= LOG)
|
||||
elog(LOG, "deleted %s", GetRelativePath(file->path, pgdata));
|
||||
if (logger_config.log_level_console <= LOG ||
|
||||
logger_config.log_level_file <= LOG)
|
||||
elog(LOG, "deleted %s", GetRelativePath(file->path,
|
||||
instance_config.pgdata));
|
||||
}
|
||||
}
|
||||
|
||||
@ -621,15 +625,17 @@ restore_files(void *arg)
|
||||
{
|
||||
char to_path[MAXPGPATH];
|
||||
|
||||
join_path_components(to_path, pgdata,
|
||||
join_path_components(to_path, instance_config.pgdata,
|
||||
file->path + strlen(from_root) + 1);
|
||||
restore_data_file(to_path, file,
|
||||
arguments->backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
||||
false,
|
||||
parse_program_version(arguments->backup->program_version));
|
||||
}
|
||||
else if (strcmp(file->name, "pg_control") == 0)
|
||||
copy_pgcontrol_file(from_root, instance_config.pgdata, file, FIO_DB_HOST);
|
||||
else
|
||||
copy_file(from_root, pgdata, file, FIO_DB_HOST);
|
||||
copy_file(from_root, instance_config.pgdata, file, FIO_DB_HOST);
|
||||
|
||||
/* print size of restored file */
|
||||
if (file->write_size != BYTES_INVALID)
|
||||
@ -664,7 +670,7 @@ create_recovery_conf(time_t backup_id,
|
||||
elog(LOG, "----------------------------------------");
|
||||
elog(LOG, "creating recovery.conf");
|
||||
|
||||
snprintf(path, lengthof(path), "%s/recovery.conf", pgdata);
|
||||
snprintf(path, lengthof(path), "%s/recovery.conf", instance_config.pgdata);
|
||||
fp = fopen(path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot open recovery.conf \"%s\": %s", path,
|
||||
|
30
src/show.c
30
src/show.c
@ -363,7 +363,7 @@ show_instance_plain(parray *backup_list, bool show_name)
|
||||
time2iso(row->recovery_time, lengthof(row->recovery_time),
|
||||
backup->recovery_time);
|
||||
else
|
||||
StrNCpy(row->recovery_time, "----", 4);
|
||||
StrNCpy(row->recovery_time, "----", sizeof(row->recovery_time));
|
||||
widths[cur] = Max(widths[cur], strlen(row->recovery_time));
|
||||
cur++;
|
||||
|
||||
@ -388,7 +388,7 @@ show_instance_plain(parray *backup_list, bool show_name)
|
||||
snprintf(row->duration, lengthof(row->duration), "%.*lfs", 0,
|
||||
difftime(backup->end_time, backup->start_time));
|
||||
else
|
||||
StrNCpy(row->duration, "----", 4);
|
||||
StrNCpy(row->duration, "----", sizeof(row->duration));
|
||||
widths[cur] = Max(widths[cur], strlen(row->duration));
|
||||
cur++;
|
||||
|
||||
@ -521,8 +521,8 @@ show_instance_json(parray *backup_list)
|
||||
/* Begin of instance object */
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
|
||||
json_add_value(buf, "instance", instance_name, json_level, false);
|
||||
json_add_key(buf, "backups", json_level, true);
|
||||
json_add_value(buf, "instance", instance_name, json_level, true);
|
||||
json_add_key(buf, "backups", json_level);
|
||||
|
||||
/*
|
||||
* List backups.
|
||||
@ -542,7 +542,7 @@ show_instance_json(parray *backup_list)
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
|
||||
json_add_value(buf, "id", base36enc(backup->start_time), json_level,
|
||||
false);
|
||||
true);
|
||||
|
||||
if (backup->parent_backup != 0)
|
||||
json_add_value(buf, "parent-backup-id",
|
||||
@ -558,20 +558,20 @@ show_instance_json(parray *backup_list)
|
||||
deparse_compress_alg(backup->compress_alg), json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "compress-level", json_level, true);
|
||||
json_add_key(buf, "compress-level", json_level);
|
||||
appendPQExpBuffer(buf, "%d", backup->compress_level);
|
||||
|
||||
json_add_value(buf, "from-replica",
|
||||
backup->from_replica ? "true" : "false", json_level,
|
||||
true);
|
||||
false);
|
||||
|
||||
json_add_key(buf, "block-size", json_level, true);
|
||||
json_add_key(buf, "block-size", json_level);
|
||||
appendPQExpBuffer(buf, "%u", backup->block_size);
|
||||
|
||||
json_add_key(buf, "xlog-block-size", json_level, true);
|
||||
json_add_key(buf, "xlog-block-size", json_level);
|
||||
appendPQExpBuffer(buf, "%u", backup->wal_block_size);
|
||||
|
||||
json_add_key(buf, "checksum-version", json_level, true);
|
||||
json_add_key(buf, "checksum-version", json_level);
|
||||
appendPQExpBuffer(buf, "%u", backup->checksum_version);
|
||||
|
||||
json_add_value(buf, "program-version", backup->program_version,
|
||||
@ -579,10 +579,10 @@ show_instance_json(parray *backup_list)
|
||||
json_add_value(buf, "server-version", backup->server_version,
|
||||
json_level, true);
|
||||
|
||||
json_add_key(buf, "current-tli", json_level, true);
|
||||
json_add_key(buf, "current-tli", json_level);
|
||||
appendPQExpBuffer(buf, "%d", backup->tli);
|
||||
|
||||
json_add_key(buf, "parent-tli", json_level, true);
|
||||
json_add_key(buf, "parent-tli", json_level);
|
||||
parent_tli = get_parent_tli(backup->tli);
|
||||
appendPQExpBuffer(buf, "%u", parent_tli);
|
||||
|
||||
@ -603,7 +603,7 @@ show_instance_json(parray *backup_list)
|
||||
json_add_value(buf, "end-time", timestamp, json_level, true);
|
||||
}
|
||||
|
||||
json_add_key(buf, "recovery-xid", json_level, true);
|
||||
json_add_key(buf, "recovery-xid", json_level);
|
||||
appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid);
|
||||
|
||||
if (backup->recovery_time > 0)
|
||||
@ -614,13 +614,13 @@ show_instance_json(parray *backup_list)
|
||||
|
||||
if (backup->data_bytes != BYTES_INVALID)
|
||||
{
|
||||
json_add_key(buf, "data-bytes", json_level, true);
|
||||
json_add_key(buf, "data-bytes", json_level);
|
||||
appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes);
|
||||
}
|
||||
|
||||
if (backup->wal_bytes != BYTES_INVALID)
|
||||
{
|
||||
json_add_key(buf, "wal-bytes", json_level, true);
|
||||
json_add_key(buf, "wal-bytes", json_level);
|
||||
appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes);
|
||||
}
|
||||
|
||||
|
94
src/util.c
94
src/util.c
@ -109,7 +109,7 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size)
|
||||
* Write ControlFile to pg_control
|
||||
*/
|
||||
static void
|
||||
writeControlFile(ControlFileData *ControlFile, char *path)
|
||||
writeControlFile(ControlFileData *ControlFile, char *path, fio_location location)
|
||||
{
|
||||
int fd;
|
||||
char *buffer = NULL;
|
||||
@ -125,21 +125,19 @@ writeControlFile(ControlFileData *ControlFile, char *path)
|
||||
memcpy(buffer, ControlFile, sizeof(ControlFileData));
|
||||
|
||||
/* Write pg_control */
|
||||
unlink(path);
|
||||
fd = open(path,
|
||||
O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
|
||||
S_IRUSR | S_IWUSR);
|
||||
fd = fio_open(path,
|
||||
O_RDWR | O_CREAT | O_TRUNC | PG_BINARY, location);
|
||||
|
||||
if (fd < 0)
|
||||
elog(ERROR, "Failed to open file: %s", path);
|
||||
|
||||
if (write(fd, buffer, ControlFileSize) != ControlFileSize)
|
||||
if (fio_write(fd, buffer, ControlFileSize) != ControlFileSize)
|
||||
elog(ERROR, "Failed to overwrite file: %s", path);
|
||||
|
||||
if (fsync(fd) != 0)
|
||||
if (fio_flush(fd) != 0)
|
||||
elog(ERROR, "Failed to fsync file: %s", path);
|
||||
|
||||
close(fd);
|
||||
fio_close(fd);
|
||||
pg_free(buffer);
|
||||
}
|
||||
|
||||
@ -155,7 +153,8 @@ get_current_timeline(bool safe)
|
||||
size_t size;
|
||||
|
||||
/* First fetch file... */
|
||||
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
|
||||
buffer = slurpFile(instance_config.pgdata, "global/pg_control", &size,
|
||||
safe);
|
||||
if (safe && buffer == NULL)
|
||||
return 0;
|
||||
|
||||
@ -206,7 +205,7 @@ get_checkpoint_location(PGconn *conn)
|
||||
}
|
||||
|
||||
uint64
|
||||
get_system_identifier(char *pgdata_path)
|
||||
get_system_identifier(const char *pgdata_path)
|
||||
{
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
@ -284,7 +283,8 @@ get_data_checksum_version(bool safe)
|
||||
size_t size;
|
||||
|
||||
/* First fetch file... */
|
||||
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
|
||||
buffer = slurpFile(instance_config.pgdata, "global/pg_control", &size,
|
||||
safe);
|
||||
if (buffer == NULL)
|
||||
return 0;
|
||||
digestControlFile(&ControlFile, buffer, size);
|
||||
@ -293,9 +293,30 @@ get_data_checksum_version(bool safe)
|
||||
return ControlFile.data_checksum_version;
|
||||
}
|
||||
|
||||
/* MinRecoveryPoint 'as-is' is not to be trusted */
|
||||
pg_crc32c
|
||||
get_pgcontrol_checksum(const char *pgdata_path)
|
||||
{
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
size_t size;
|
||||
|
||||
/* First fetch file... */
|
||||
buffer = slurpFile(pgdata_path, "global/pg_control", &size, false);
|
||||
if (buffer == NULL)
|
||||
return 0;
|
||||
digestControlFile(&ControlFile, buffer, size);
|
||||
pg_free(buffer);
|
||||
|
||||
return ControlFile.crc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Rewrite minRecoveryPoint of pg_control in backup directory. minRecoveryPoint
|
||||
* 'as-is' is not to be trusted.
|
||||
*/
|
||||
void
|
||||
set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn)
|
||||
set_min_recovery_point(pgFile *file, const char *backup_path,
|
||||
XLogRecPtr stop_backup_lsn)
|
||||
{
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
@ -303,7 +324,7 @@ set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_ba
|
||||
char fullpath[MAXPGPATH];
|
||||
|
||||
/* First fetch file content */
|
||||
buffer = slurpFile(pgdata, XLOG_CONTROL_FILE, &size, false);
|
||||
buffer = slurpFile(instance_config.pgdata, XLOG_CONTROL_FILE, &size, false);
|
||||
if (buffer == NULL)
|
||||
elog(ERROR, "ERROR");
|
||||
|
||||
@ -321,52 +342,43 @@ set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_ba
|
||||
|
||||
/* Update checksum in pg_control header */
|
||||
INIT_CRC32C(ControlFile.crc);
|
||||
COMP_CRC32C(ControlFile.crc,
|
||||
(char *) &ControlFile,
|
||||
COMP_CRC32C(ControlFile.crc, (char *) &ControlFile,
|
||||
offsetof(ControlFileData, crc));
|
||||
FIN_CRC32C(ControlFile.crc);
|
||||
|
||||
/* paranoia */
|
||||
checkControlFile(&ControlFile);
|
||||
|
||||
/* overwrite pg_control */
|
||||
snprintf(fullpath, sizeof(fullpath), "%s/%s", backup_path, XLOG_CONTROL_FILE);
|
||||
writeControlFile(&ControlFile, fullpath);
|
||||
writeControlFile(&ControlFile, fullpath, FIO_LOCAL_HOST);
|
||||
|
||||
/* Update pg_control checksum in backup_list */
|
||||
file->crc = pgFileGetCRC(fullpath, false);
|
||||
file->crc = ControlFile.crc;
|
||||
|
||||
pg_free(buffer);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Convert time_t value to ISO-8601 format string. Always set timezone offset.
|
||||
* Copy pg_control file to backup. We do not apply compression to this file.
|
||||
*/
|
||||
void
|
||||
time2iso(char *buf, size_t len, time_t time)
|
||||
copy_pgcontrol_file(const char *from_root, const char *to_root, pgFile *file, fio_location location)
|
||||
{
|
||||
struct tm *ptm = gmtime(&time);
|
||||
time_t gmt = mktime(ptm);
|
||||
time_t offset;
|
||||
char *ptr = buf;
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
size_t size;
|
||||
char to_path[MAXPGPATH];
|
||||
|
||||
ptm = localtime(&time);
|
||||
offset = time - gmt + (ptm->tm_isdst ? 3600 : 0);
|
||||
buffer = slurpFile(from_root, XLOG_CONTROL_FILE, &size, false);
|
||||
|
||||
strftime(ptr, len, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
digestControlFile(&ControlFile, buffer, size);
|
||||
|
||||
ptr += strlen(ptr);
|
||||
snprintf(ptr, len - (ptr - buf), "%c%02d",
|
||||
(offset >= 0) ? '+' : '-',
|
||||
abs((int) offset) / SECS_PER_HOUR);
|
||||
file->crc = ControlFile.crc;
|
||||
file->read_size = size;
|
||||
file->write_size = size;
|
||||
|
||||
if (abs((int) offset) % SECS_PER_HOUR != 0)
|
||||
{
|
||||
ptr += strlen(ptr);
|
||||
snprintf(ptr, len - (ptr - buf), ":%02d",
|
||||
abs((int) offset % SECS_PER_HOUR) / SECS_PER_MINUTE);
|
||||
}
|
||||
join_path_components(to_path, to_root, file->path + strlen(from_root) + 1);
|
||||
writeControlFile(&ControlFile, to_path, location);
|
||||
|
||||
pg_free(buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
|
1499
src/utils/configuration.c
Normal file
1499
src/utils/configuration.c
Normal file
File diff suppressed because it is too large
Load Diff
106
src/utils/configuration.h
Normal file
106
src/utils/configuration.h
Normal file
@ -0,0 +1,106 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* configuration.h: - prototypes of functions and structures for
|
||||
* configuration.
|
||||
*
|
||||
* Copyright (c) 2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef CONFIGURATION_H
|
||||
#define CONFIGURATION_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "access/xlogdefs.h"
|
||||
|
||||
#define INFINITE_STR "INFINITE"
|
||||
|
||||
typedef enum OptionSource
|
||||
{
|
||||
SOURCE_DEFAULT,
|
||||
SOURCE_FILE_STRICT,
|
||||
SOURCE_CMD_STRICT,
|
||||
SOURCE_ENV,
|
||||
SOURCE_FILE,
|
||||
SOURCE_CMD,
|
||||
SOURCE_CONST
|
||||
} OptionSource;
|
||||
|
||||
typedef struct ConfigOption ConfigOption;
|
||||
|
||||
typedef void (*option_assign_fn) (ConfigOption *opt, const char *arg);
|
||||
/* Returns allocated string value */
|
||||
typedef char *(*option_get_fn) (ConfigOption *opt);
|
||||
|
||||
/*
|
||||
* type:
|
||||
* b: bool (true)
|
||||
* B: bool (false)
|
||||
* f: option_fn
|
||||
* i: 32bit signed integer
|
||||
* u: 32bit unsigned integer
|
||||
* I: 64bit signed integer
|
||||
* U: 64bit unsigned integer
|
||||
* s: string
|
||||
* t: time_t
|
||||
*/
|
||||
struct ConfigOption
|
||||
{
|
||||
char type;
|
||||
uint8 sname; /* short name */
|
||||
const char *lname; /* long name */
|
||||
void *var; /* pointer to variable */
|
||||
OptionSource allowed; /* allowed source */
|
||||
OptionSource source; /* actual source */
|
||||
const char *group; /* option group name */
|
||||
int flags; /* option unit */
|
||||
option_get_fn get_value; /* function to get the value as a string,
|
||||
should return allocated string*/
|
||||
};
|
||||
|
||||
/*
|
||||
* bit values in "flags" of an option
|
||||
*/
|
||||
#define OPTION_UNIT_KB 0x1000 /* value is in kilobytes */
|
||||
#define OPTION_UNIT_BLOCKS 0x2000 /* value is in blocks */
|
||||
#define OPTION_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
|
||||
#define OPTION_UNIT_XSEGS 0x4000 /* value is in xlog segments */
|
||||
#define OPTION_UNIT_MEMORY 0xF000 /* mask for size-related units */
|
||||
|
||||
#define OPTION_UNIT_MS 0x10000 /* value is in milliseconds */
|
||||
#define OPTION_UNIT_S 0x20000 /* value is in seconds */
|
||||
#define OPTION_UNIT_MIN 0x30000 /* value is in minutes */
|
||||
#define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */
|
||||
|
||||
#define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME)
|
||||
|
||||
extern int config_get_opt(int argc, char **argv, ConfigOption cmd_options[],
|
||||
ConfigOption options[]);
|
||||
extern int config_read_opt(const char *path, ConfigOption options[], int elevel,
|
||||
bool strict);
|
||||
extern void config_get_opt_env(ConfigOption options[]);
|
||||
extern void config_set_opt(ConfigOption options[], void *var,
|
||||
OptionSource source);
|
||||
|
||||
extern char *option_get_value(ConfigOption *opt);
|
||||
|
||||
extern bool parse_bool(const char *value, bool *result);
|
||||
extern bool parse_bool_with_len(const char *value, size_t len, bool *result);
|
||||
extern bool parse_int32(const char *value, int32 *result, int flags);
|
||||
extern bool parse_uint32(const char *value, uint32 *result, int flags);
|
||||
extern bool parse_int64(const char *value, int64 *result, int flags);
|
||||
extern bool parse_uint64(const char *value, uint64 *result, int flags);
|
||||
extern bool parse_time(const char *value, time_t *result, bool utc_default);
|
||||
extern bool parse_int(const char *value, int *result, int flags,
|
||||
const char **hintmsg);
|
||||
extern bool parse_lsn(const char *value, XLogRecPtr *result);
|
||||
|
||||
extern void time2iso(char *buf, size_t len, time_t time);
|
||||
|
||||
extern void convert_from_base_unit(int64 base_value, int base_unit,
|
||||
int64 *value, const char **unit);
|
||||
extern void convert_from_base_unit_u(uint64 base_value, int base_unit,
|
||||
uint64 *value, const char **unit);
|
||||
|
||||
#endif /* CONFIGURATION_H */
|
@ -591,7 +591,26 @@ gzFile fio_gzopen(char const* path, char const* mode, int* tmp_fd, fio_location
|
||||
int fd = mkstemp("gz.XXXXXX");
|
||||
if (fd < 0)
|
||||
return NULL;
|
||||
*tmp_fd = fd;
|
||||
if (strcmp(mode, PG_BINARY_W) == 0)
|
||||
{
|
||||
*tmp_fd = fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
int rd = fio_open(path, O_RDONLY|PG_BINARY, location);
|
||||
struct stat st;
|
||||
void* buf;
|
||||
if (rd < 0) {
|
||||
return NULL;
|
||||
}
|
||||
SYS_CHECK(fio_fstat(rd, &st));
|
||||
buf = malloc(st.st_size);
|
||||
IO_CHECK(fio_read(rd, buf, st.st_size), st.st_size);
|
||||
IO_CHECK(write(fd, buf, st.st_size), st.st_size);
|
||||
SYS_CHECK(fio_close(rd));
|
||||
free(buf);
|
||||
*tmp_fd = -1;
|
||||
}
|
||||
file = gzdopen(fd, mode);
|
||||
}
|
||||
else
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define __FILE__H__
|
||||
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
#include <zlib.h>
|
||||
|
@ -12,6 +12,8 @@
|
||||
static void json_add_indent(PQExpBuffer buf, int32 level);
|
||||
static void json_add_escaped(PQExpBuffer buf, const char *str);
|
||||
|
||||
static bool add_comma = false;
|
||||
|
||||
/*
|
||||
* Start or end json token. Currently it is a json object or array.
|
||||
*
|
||||
@ -25,6 +27,7 @@ json_add(PQExpBuffer buf, JsonToken type, int32 *level)
|
||||
case JT_BEGIN_ARRAY:
|
||||
appendPQExpBufferChar(buf, '[');
|
||||
*level += 1;
|
||||
add_comma = false;
|
||||
break;
|
||||
case JT_END_ARRAY:
|
||||
*level -= 1;
|
||||
@ -33,11 +36,13 @@ json_add(PQExpBuffer buf, JsonToken type, int32 *level)
|
||||
else
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, ']');
|
||||
add_comma = true;
|
||||
break;
|
||||
case JT_BEGIN_OBJECT:
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, '{');
|
||||
*level += 1;
|
||||
add_comma = false;
|
||||
break;
|
||||
case JT_END_OBJECT:
|
||||
*level -= 1;
|
||||
@ -46,6 +51,7 @@ json_add(PQExpBuffer buf, JsonToken type, int32 *level)
|
||||
else
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, '}');
|
||||
add_comma = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -56,7 +62,7 @@ json_add(PQExpBuffer buf, JsonToken type, int32 *level)
|
||||
* Add json object's key. If it isn't first key we need to add a comma.
|
||||
*/
|
||||
void
|
||||
json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
|
||||
json_add_key(PQExpBuffer buf, const char *name, int32 level)
|
||||
{
|
||||
if (add_comma)
|
||||
appendPQExpBufferChar(buf, ',');
|
||||
@ -64,6 +70,8 @@ json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
|
||||
|
||||
json_add_escaped(buf, name);
|
||||
appendPQExpBufferStr(buf, ": ");
|
||||
|
||||
add_comma = true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -72,10 +80,14 @@ json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
|
||||
*/
|
||||
void
|
||||
json_add_value(PQExpBuffer buf, const char *name, const char *value,
|
||||
int32 level, bool add_comma)
|
||||
int32 level, bool escaped)
|
||||
{
|
||||
json_add_key(buf, name, level, add_comma);
|
||||
json_add_escaped(buf, value);
|
||||
json_add_key(buf, name, level);
|
||||
|
||||
if (escaped)
|
||||
json_add_escaped(buf, value);
|
||||
else
|
||||
appendPQExpBufferStr(buf, value);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -25,9 +25,8 @@ typedef enum
|
||||
} JsonToken;
|
||||
|
||||
extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level);
|
||||
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level,
|
||||
bool add_comma);
|
||||
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level);
|
||||
extern void json_add_value(PQExpBuffer buf, const char *name, const char *value,
|
||||
int32 level, bool add_comma);
|
||||
int32 level, bool escaped);
|
||||
|
||||
#endif /* PROBACKUP_JSON_H */
|
||||
|
@ -16,24 +16,17 @@
|
||||
#include "thread.h"
|
||||
#include <time.h>
|
||||
|
||||
#include "utils/configuration.h"
|
||||
|
||||
/* Logger parameters */
|
||||
|
||||
int log_level_console = LOG_LEVEL_CONSOLE_DEFAULT;
|
||||
int log_level_file = LOG_LEVEL_FILE_DEFAULT;
|
||||
|
||||
char *log_filename = NULL;
|
||||
char *error_log_filename = NULL;
|
||||
char *log_directory = NULL;
|
||||
/*
|
||||
* If log_path is empty logging is not initialized.
|
||||
* We will log only into stderr
|
||||
*/
|
||||
char log_path[MAXPGPATH] = "";
|
||||
|
||||
/* Maximum size of an individual log file in kilobytes */
|
||||
uint64 log_rotation_size = 0;
|
||||
/* Maximum lifetime of an individual log file in minutes */
|
||||
uint64 log_rotation_age = 0;
|
||||
LoggerConfig logger_config = {
|
||||
LOG_LEVEL_CONSOLE_DEFAULT,
|
||||
LOG_LEVEL_FILE_DEFAULT,
|
||||
LOG_FILENAME_DEFAULT,
|
||||
NULL,
|
||||
LOG_ROTATION_SIZE_DEFAULT,
|
||||
LOG_ROTATION_AGE_DEFAULT
|
||||
};
|
||||
|
||||
/* Implementation for logging.h */
|
||||
|
||||
@ -69,17 +62,24 @@ static bool loggin_in_progress = false;
|
||||
|
||||
static pthread_mutex_t log_file_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
/*
|
||||
* Initialize logger.
|
||||
*
|
||||
* If log_directory wasn't set by a user we use full path:
|
||||
* backup_directory/log
|
||||
*/
|
||||
void
|
||||
init_logger(const char *root_path)
|
||||
init_logger(const char *root_path, LoggerConfig *config)
|
||||
{
|
||||
/* Set log path */
|
||||
if (log_level_file != LOG_OFF || error_log_filename)
|
||||
if (config->log_directory == NULL)
|
||||
{
|
||||
if (log_directory)
|
||||
strcpy(log_path, log_directory);
|
||||
else
|
||||
join_path_components(log_path, root_path, LOG_DIRECTORY_DEFAULT);
|
||||
config->log_directory = palloc(MAXPGPATH);
|
||||
join_path_components(config->log_directory,
|
||||
root_path, LOG_DIRECTORY_DEFAULT);
|
||||
}
|
||||
|
||||
logger_config = *config;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -158,10 +158,11 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
|
||||
time_t log_time = (time_t) time(NULL);
|
||||
char strfbuf[128];
|
||||
|
||||
write_to_file = elevel >= log_level_file && log_path[0] != '\0';
|
||||
write_to_error_log = elevel >= ERROR && error_log_filename &&
|
||||
log_path[0] != '\0';
|
||||
write_to_stderr = elevel >= log_level_console && !file_only;
|
||||
write_to_file = elevel >= logger_config.log_level_file &&
|
||||
logger_config.log_directory && logger_config.log_directory[0] != '\0';
|
||||
write_to_error_log = elevel >= ERROR && logger_config.error_log_filename &&
|
||||
logger_config.log_directory && logger_config.log_directory[0] != '\0';
|
||||
write_to_stderr = elevel >= logger_config.log_level_console && !file_only;
|
||||
|
||||
pthread_lock(&log_file_mutex);
|
||||
#ifdef WIN32
|
||||
@ -193,10 +194,10 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
|
||||
{
|
||||
if (log_file == NULL)
|
||||
{
|
||||
if (log_filename == NULL)
|
||||
if (logger_config.log_filename == NULL)
|
||||
open_logfile(&log_file, LOG_FILENAME_DEFAULT);
|
||||
else
|
||||
open_logfile(&log_file, log_filename);
|
||||
open_logfile(&log_file, logger_config.log_filename);
|
||||
}
|
||||
|
||||
fprintf(log_file, "%s: ", strfbuf);
|
||||
@ -215,7 +216,7 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
|
||||
if (write_to_error_log)
|
||||
{
|
||||
if (error_log_file == NULL)
|
||||
open_logfile(&error_log_file, error_log_filename);
|
||||
open_logfile(&error_log_file, logger_config.error_log_filename);
|
||||
|
||||
fprintf(error_log_file, "%s: ", strfbuf);
|
||||
write_elevel(error_log_file, elevel);
|
||||
@ -264,7 +265,7 @@ elog_stderr(int elevel, const char *fmt, ...)
|
||||
* Do not log message if severity level is less than log_level.
|
||||
* It is the little optimisation to put it here not in elog_internal().
|
||||
*/
|
||||
if (elevel < log_level_console && elevel < ERROR)
|
||||
if (elevel < logger_config.log_level_console && elevel < ERROR)
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
@ -291,7 +292,8 @@ elog(int elevel, const char *fmt, ...)
|
||||
* Do not log message if severity level is less than log_level.
|
||||
* It is the little optimisation to put it here not in elog_internal().
|
||||
*/
|
||||
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
|
||||
if (elevel < logger_config.log_level_console &&
|
||||
elevel < logger_config.log_level_file && elevel < ERROR)
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
@ -311,7 +313,7 @@ elog_file(int elevel, const char *fmt, ...)
|
||||
* Do not log message if severity level is less than log_level.
|
||||
* It is the little optimisation to put it here not in elog_internal().
|
||||
*/
|
||||
if (elevel < log_level_file && elevel < ERROR)
|
||||
if (elevel < logger_config.log_level_file && elevel < ERROR)
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
@ -352,7 +354,8 @@ pg_log(eLogType type, const char *fmt, ...)
|
||||
* Do not log message if severity level is less than log_level.
|
||||
* It is the little optimisation to put it here not in elog_internal().
|
||||
*/
|
||||
if (elevel < log_level_console && elevel < log_level_file && elevel < ERROR)
|
||||
if (elevel < logger_config.log_level_console &&
|
||||
elevel < logger_config.log_level_file && elevel < ERROR)
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
@ -438,12 +441,13 @@ logfile_getname(const char *format, time_t timestamp)
|
||||
size_t len;
|
||||
struct tm *tm = localtime(×tamp);
|
||||
|
||||
if (log_path[0] == '\0')
|
||||
if (logger_config.log_directory == NULL ||
|
||||
logger_config.log_directory[0] == '\0')
|
||||
elog_stderr(ERROR, "logging path is not set");
|
||||
|
||||
filename = (char *) palloc(MAXPGPATH);
|
||||
|
||||
snprintf(filename, MAXPGPATH, "%s/", log_path);
|
||||
snprintf(filename, MAXPGPATH, "%s/", logger_config.log_directory);
|
||||
|
||||
len = strlen(filename);
|
||||
|
||||
@ -465,7 +469,7 @@ logfile_open(const char *filename, const char *mode)
|
||||
/*
|
||||
* Create log directory if not present; ignore errors
|
||||
*/
|
||||
mkdir(log_path, S_IRWXU);
|
||||
mkdir(logger_config.log_directory, S_IRWXU);
|
||||
|
||||
fh = fopen(filename, mode);
|
||||
|
||||
@ -499,7 +503,7 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
|
||||
filename = logfile_getname(filename_format, cur_time);
|
||||
|
||||
/* "log_path" was checked in logfile_getname() */
|
||||
/* "log_directory" was checked in logfile_getname() */
|
||||
snprintf(control, MAXPGPATH, "%s.rotation", filename);
|
||||
|
||||
if (stat(filename, &st) == -1)
|
||||
@ -517,10 +521,11 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
logfile_exists = true;
|
||||
|
||||
/* First check for rotation */
|
||||
if (log_rotation_size > 0 || log_rotation_age > 0)
|
||||
if (logger_config.log_rotation_size > 0 ||
|
||||
logger_config.log_rotation_age > 0)
|
||||
{
|
||||
/* Check for rotation by age */
|
||||
if (log_rotation_age > 0)
|
||||
if (logger_config.log_rotation_age > 0)
|
||||
{
|
||||
struct stat control_st;
|
||||
|
||||
@ -551,7 +556,7 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
|
||||
rotation_requested = (cur_time - creation_time) >
|
||||
/* convert to seconds from milliseconds */
|
||||
log_rotation_age / 1000;
|
||||
logger_config.log_rotation_age / 1000;
|
||||
}
|
||||
else
|
||||
elog_stderr(ERROR, "cannot read creation timestamp from "
|
||||
@ -562,10 +567,10 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
}
|
||||
|
||||
/* Check for rotation by size */
|
||||
if (!rotation_requested && log_rotation_size > 0)
|
||||
if (!rotation_requested && logger_config.log_rotation_size > 0)
|
||||
rotation_requested = st.st_size >=
|
||||
/* convert to bytes */
|
||||
log_rotation_size * 1024L;
|
||||
logger_config.log_rotation_size * 1024L;
|
||||
}
|
||||
|
||||
logfile_open:
|
||||
|
@ -21,33 +21,36 @@
|
||||
#define ERROR 1
|
||||
#define LOG_OFF 10
|
||||
|
||||
typedef struct LoggerConfig
|
||||
{
|
||||
int log_level_console;
|
||||
int log_level_file;
|
||||
char *log_filename;
|
||||
char *error_log_filename;
|
||||
char *log_directory;
|
||||
/* Maximum size of an individual log file in kilobytes */
|
||||
uint64 log_rotation_size;
|
||||
/* Maximum lifetime of an individual log file in minutes */
|
||||
uint64 log_rotation_age;
|
||||
} LoggerConfig;
|
||||
|
||||
/* Logger parameters */
|
||||
extern LoggerConfig logger_config;
|
||||
|
||||
extern int log_to_file;
|
||||
extern int log_level_console;
|
||||
extern int log_level_file;
|
||||
#define LOG_ROTATION_SIZE_DEFAULT 0
|
||||
#define LOG_ROTATION_AGE_DEFAULT 0
|
||||
|
||||
extern char *log_filename;
|
||||
extern char *error_log_filename;
|
||||
extern char *log_directory;
|
||||
extern char log_path[MAXPGPATH];
|
||||
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
|
||||
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF
|
||||
|
||||
#define LOG_ROTATION_SIZE_DEFAULT 0
|
||||
#define LOG_ROTATION_AGE_DEFAULT 0
|
||||
extern uint64 log_rotation_size;
|
||||
extern uint64 log_rotation_age;
|
||||
|
||||
#define LOG_LEVEL_CONSOLE_DEFAULT INFO
|
||||
#define LOG_LEVEL_FILE_DEFAULT LOG_OFF
|
||||
|
||||
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
|
||||
#define LOG_DIRECTORY_DEFAULT "log"
|
||||
#define LOG_FILENAME_DEFAULT "pg_probackup.log"
|
||||
#define LOG_DIRECTORY_DEFAULT "log"
|
||||
|
||||
#undef elog
|
||||
extern void elog(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
|
||||
extern void elog_file(int elevel, const char *fmt, ...) pg_attribute_printf(2, 3);
|
||||
|
||||
extern void init_logger(const char *root_path);
|
||||
extern void init_logger(const char *root_path, LoggerConfig *config);
|
||||
|
||||
extern int parse_log_level(const char *level);
|
||||
extern const char *deparse_log_level(int level);
|
||||
|
1393
src/utils/pgut.c
1393
src/utils/pgut.c
File diff suppressed because it is too large
Load Diff
@ -12,63 +12,10 @@
|
||||
#define PGUT_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "access/xlogdefs.h"
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#define INFINITE_STR "INFINITE"
|
||||
|
||||
typedef enum pgut_optsrc
|
||||
{
|
||||
SOURCE_DEFAULT,
|
||||
SOURCE_FILE_STRICT,
|
||||
SOURCE_ENV,
|
||||
SOURCE_FILE,
|
||||
SOURCE_CMDLINE,
|
||||
SOURCE_CONST
|
||||
} pgut_optsrc;
|
||||
|
||||
/*
|
||||
* type:
|
||||
* b: bool (true)
|
||||
* B: bool (false)
|
||||
* f: pgut_optfn
|
||||
* i: 32bit signed integer
|
||||
* u: 32bit unsigned integer
|
||||
* I: 64bit signed integer
|
||||
* U: 64bit unsigned integer
|
||||
* s: string
|
||||
* t: time_t
|
||||
*/
|
||||
typedef struct pgut_option
|
||||
{
|
||||
char type;
|
||||
uint8 sname; /* short name */
|
||||
const char *lname; /* long name */
|
||||
void *var; /* pointer to variable */
|
||||
pgut_optsrc allowed; /* allowed source */
|
||||
pgut_optsrc source; /* actual source */
|
||||
int flags; /* option unit */
|
||||
} pgut_option;
|
||||
|
||||
typedef void (*pgut_optfn) (pgut_option *opt, const char *arg);
|
||||
typedef void (*pgut_atexit_callback)(bool fatal, void *userdata);
|
||||
|
||||
/*
|
||||
* bit values in "flags" of an option
|
||||
*/
|
||||
#define OPTION_UNIT_KB 0x1000 /* value is in kilobytes */
|
||||
#define OPTION_UNIT_BLOCKS 0x2000 /* value is in blocks */
|
||||
#define OPTION_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
|
||||
#define OPTION_UNIT_XSEGS 0x4000 /* value is in xlog segments */
|
||||
#define OPTION_UNIT_MEMORY 0xF000 /* mask for size-related units */
|
||||
|
||||
#define OPTION_UNIT_MS 0x10000 /* value is in milliseconds */
|
||||
#define OPTION_UNIT_S 0x20000 /* value is in seconds */
|
||||
#define OPTION_UNIT_MIN 0x30000 /* value is in minutes */
|
||||
#define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */
|
||||
|
||||
#define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME)
|
||||
|
||||
/*
|
||||
* pgut client variables and functions
|
||||
*/
|
||||
@ -82,10 +29,6 @@ extern void pgut_help(bool details);
|
||||
/*
|
||||
* pgut framework variables and functions
|
||||
*/
|
||||
extern const char *pgut_dbname;
|
||||
extern const char *host;
|
||||
extern const char *port;
|
||||
extern const char *username;
|
||||
extern bool prompt_password;
|
||||
extern bool force_password;
|
||||
|
||||
@ -93,23 +36,21 @@ extern bool interrupted;
|
||||
extern bool in_cleanup;
|
||||
extern bool in_password; /* User prompts password */
|
||||
|
||||
extern int pgut_getopt(int argc, char **argv, pgut_option options[]);
|
||||
extern int pgut_readopt(const char *path, pgut_option options[], int elevel,
|
||||
bool strict);
|
||||
extern void pgut_getopt_env(pgut_option options[]);
|
||||
extern void pgut_atexit_push(pgut_atexit_callback callback, void *userdata);
|
||||
extern void pgut_atexit_pop(pgut_atexit_callback callback, void *userdata);
|
||||
|
||||
extern void pgut_init(void);
|
||||
extern void exit_or_abort(int exitcode);
|
||||
|
||||
/*
|
||||
* Database connections
|
||||
*/
|
||||
extern char *pgut_get_conninfo_string(PGconn *conn);
|
||||
extern PGconn *pgut_connect(const char *dbname);
|
||||
extern PGconn *pgut_connect_extended(const char *pghost, const char *pgport,
|
||||
const char *dbname, const char *login);
|
||||
extern PGconn *pgut_connect_replication(const char *dbname);
|
||||
extern PGconn *pgut_connect_replication_extended(const char *pghost, const char *pgport,
|
||||
const char *dbname, const char *pguser);
|
||||
extern PGconn *pgut_connect(const char *host, const char *port,
|
||||
const char *dbname, const char *username);
|
||||
extern PGconn *pgut_connect_replication(const char *host, const char *port,
|
||||
const char *dbname,
|
||||
const char *username);
|
||||
extern void pgut_disconnect(PGconn *conn);
|
||||
extern PGresult *pgut_execute(PGconn* conn, const char *query, int nParams,
|
||||
const char **params);
|
||||
@ -132,6 +73,11 @@ extern char *pgut_strdup(const char *str);
|
||||
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
|
||||
#define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n)))
|
||||
|
||||
/*
|
||||
* file operations
|
||||
*/
|
||||
extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok);
|
||||
|
||||
/*
|
||||
* Assert
|
||||
*/
|
||||
@ -149,22 +95,6 @@ extern char *pgut_strdup(const char *str);
|
||||
#define AssertMacro(x) ((void) 0)
|
||||
#endif
|
||||
|
||||
extern bool parse_bool(const char *value, bool *result);
|
||||
extern bool parse_bool_with_len(const char *value, size_t len, bool *result);
|
||||
extern bool parse_int32(const char *value, int32 *result, int flags);
|
||||
extern bool parse_uint32(const char *value, uint32 *result, int flags);
|
||||
extern bool parse_int64(const char *value, int64 *result, int flags);
|
||||
extern bool parse_uint64(const char *value, uint64 *result, int flags);
|
||||
extern bool parse_time(const char *value, time_t *result, bool utc_default);
|
||||
extern bool parse_int(const char *value, int *result, int flags,
|
||||
const char **hintmsg);
|
||||
extern bool parse_lsn(const char *value, XLogRecPtr *result);
|
||||
|
||||
extern void convert_from_base_unit(int64 base_value, int base_unit,
|
||||
int64 *value, const char **unit);
|
||||
extern void convert_from_base_unit_u(uint64 base_value, int base_unit,
|
||||
uint64 *value, const char **unit);
|
||||
|
||||
#define IsSpace(c) (isspace((unsigned char)(c)))
|
||||
#define IsAlpha(c) (isalpha((unsigned char)(c)))
|
||||
#define IsAlnum(c) (isalnum((unsigned char)(c)))
|
||||
|
@ -22,6 +22,7 @@ static bool corrupted_backup_found = false;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const char *base_path;
|
||||
parray *files;
|
||||
bool corrupted;
|
||||
XLogRecPtr stop_lsn;
|
||||
@ -101,6 +102,7 @@ pgBackupValidate(pgBackup *backup)
|
||||
{
|
||||
validate_files_arg *arg = &(threads_args[i]);
|
||||
|
||||
arg->base_path = base_path;
|
||||
arg->files = files;
|
||||
arg->corrupted = false;
|
||||
arg->stop_lsn = backup->stop_lsn;
|
||||
@ -223,8 +225,17 @@ pgBackupValidateFiles(void *arg)
|
||||
* CRC-32C algorithm.
|
||||
* To avoid this problem we need to use different algorithm, CRC-32 in
|
||||
* this case.
|
||||
*
|
||||
* Starting from 2.0.25 we calculate crc of pg_control differently.
|
||||
*/
|
||||
crc = pgFileGetCRC(file->path, arguments->backup_version <= 20021);
|
||||
if (arguments->backup_version >= 20025 &&
|
||||
strcmp(file->name, "pg_control") == 0)
|
||||
crc = get_pgcontrol_checksum(arguments->base_path);
|
||||
else
|
||||
crc = pgFileGetCRC(file->path,
|
||||
arguments->backup_version <= 20021 ||
|
||||
arguments->backup_version >= 20025,
|
||||
true, NULL, FIO_LOCAL_HOST);
|
||||
if (crc != file->crc)
|
||||
{
|
||||
elog(WARNING, "Invalid CRC of backup file \"%s\" : %X. Expected %X",
|
||||
@ -275,6 +286,7 @@ do_validate_all(void)
|
||||
errno = 0;
|
||||
while ((dent = readdir(dir)))
|
||||
{
|
||||
char conf_path[MAXPGPATH];
|
||||
char child[MAXPGPATH];
|
||||
struct stat st;
|
||||
|
||||
@ -291,10 +303,16 @@ do_validate_all(void)
|
||||
if (!S_ISDIR(st.st_mode))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Initialize instance configuration.
|
||||
*/
|
||||
instance_name = dent->d_name;
|
||||
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
|
||||
sprintf(backup_instance_path, "%s/%s/%s",
|
||||
backup_path, BACKUPS_DIR, instance_name);
|
||||
sprintf(arclog_path, "%s/%s/%s", backup_path, "wal", instance_name);
|
||||
xlog_seg_size = get_config_xlog_seg_size();
|
||||
join_path_components(conf_path, backup_instance_path,
|
||||
BACKUP_CATALOG_CONF_FILE);
|
||||
config_read_opt(conf_path, instance_options, ERROR, false);
|
||||
|
||||
do_validate_instance();
|
||||
}
|
||||
@ -418,7 +436,8 @@ do_validate_instance(void)
|
||||
/* Validate corresponding WAL files */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
validate_wal(current_backup, arclog_path, 0,
|
||||
0, 0, base_full_backup->tli, xlog_seg_size);
|
||||
0, 0, base_full_backup->tli,
|
||||
instance_config.xlog_seg_size);
|
||||
|
||||
/*
|
||||
* Mark every descendant of corrupted backup as orphan
|
||||
@ -506,7 +525,7 @@ do_validate_instance(void)
|
||||
/* Revalidation successful, validate corresponding WAL files */
|
||||
validate_wal(backup, arclog_path, 0,
|
||||
0, 0, current_backup->tli,
|
||||
xlog_seg_size);
|
||||
instance_config.xlog_seg_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,8 +16,8 @@ def load_tests(loader, tests, pattern):
|
||||
suite.addTests(loader.loadTestsFromModule(archive))
|
||||
suite.addTests(loader.loadTestsFromModule(backup_test))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_backup))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(logging))
|
||||
suite.addTests(loader.loadTestsFromModule(compression))
|
||||
|
@ -1,11 +1,12 @@
|
||||
import os
|
||||
import shutil
|
||||
import gzip
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, archive_script
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException
|
||||
from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
from time import sleep
|
||||
from shutil import copyfile
|
||||
|
||||
|
||||
module_name = 'archive'
|
||||
@ -220,7 +221,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_pgpro434_3(self):
|
||||
"""Check pg_stop_backup_timeout, needed backup_timeout"""
|
||||
"""
|
||||
Check pg_stop_backup_timeout, needed backup_timeout
|
||||
Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
@ -235,37 +239,32 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
|
||||
archive_script_path = os.path.join(backup_dir, 'archive_script.sh')
|
||||
with open(archive_script_path, 'w+') as f:
|
||||
f.write(
|
||||
archive_script.format(
|
||||
backup_dir=backup_dir, node_name='node', count_limit=2))
|
||||
|
||||
st = os.stat(archive_script_path)
|
||||
os.chmod(archive_script_path, st.st_mode | 0o111)
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', "archive_command = '{0} %p %f'".format(
|
||||
archive_script_path))
|
||||
node.slow_start()
|
||||
try:
|
||||
self.backup_node(
|
||||
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=[
|
||||
"--archive-timeout=60",
|
||||
"--stream"]
|
||||
)
|
||||
# we should die here because exception is what we expect to happen
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because pg_stop_backup failed to answer.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: pg_stop_backup doesn't answer" in e.message and
|
||||
"cancel it" in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
"--stream",
|
||||
"--log-level-file=info"],
|
||||
gdb=True)
|
||||
|
||||
gdb.set_breakpoint('pg_stop_backup')
|
||||
gdb.run_until_break()
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', "archive_command = 'exit 1'")
|
||||
node.reload()
|
||||
|
||||
gdb.continue_execution_until_exit()
|
||||
|
||||
log_file = os.path.join(backup_dir, 'log/pg_probackup.log')
|
||||
with open(log_file, 'r') as f:
|
||||
log_content = f.read()
|
||||
self.assertNotIn(
|
||||
"ERROR: pg_stop_backup doesn't answer",
|
||||
log_content,
|
||||
"pg_stop_backup timeouted")
|
||||
|
||||
log_file = os.path.join(node.logs_dir, 'postgresql.log')
|
||||
with open(log_file, 'r') as f:
|
||||
@ -325,7 +324,16 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
self.assertFalse('pg_probackup archive-push completed successfully' in log_content)
|
||||
|
||||
os.remove(file)
|
||||
wal_src = os.path.join(
|
||||
node.data_dir, 'pg_wal', '000000010000000000000001')
|
||||
|
||||
if self.archive_compress:
|
||||
with open(wal_src, 'rb') as f_in, gzip.open(
|
||||
file, 'wb', compresslevel=1) as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
else:
|
||||
shutil.copyfile(wal_src, file)
|
||||
|
||||
self.switch_wal_segment(node)
|
||||
sleep(5)
|
||||
|
||||
@ -400,7 +408,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
@unittest.skip("skip")
|
||||
# @unittest.skip("skip")
|
||||
def test_replica_archive(self):
|
||||
"""
|
||||
make node without archiving, take stream backup and
|
||||
@ -490,7 +498,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(512,20680) i")
|
||||
"from generate_series(512,80680) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
@ -498,15 +506,13 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
# copyfile(
|
||||
# os.path.join(backup_dir, 'wal/master/000000010000000000000002'),
|
||||
# os.path.join(backup_dir, 'wal/replica/000000010000000000000002'))
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='page',
|
||||
options=[
|
||||
'--archive-timeout=30',
|
||||
'--archive-timeout=60',
|
||||
'--master-db=postgres',
|
||||
'--master-host=localhost',
|
||||
'--master-port={0}'.format(master.port),
|
||||
@ -596,10 +602,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0, 60000) i")
|
||||
|
||||
# TAKE FULL ARCHIVE BACKUP FROM REPLICA
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000001'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000001'))
|
||||
master.psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 2.0.24
|
||||
pg_probackup 2.0.25
|
@ -60,19 +60,6 @@ idx_ptrack = {
|
||||
}
|
||||
}
|
||||
|
||||
archive_script = """
|
||||
#!/bin/bash
|
||||
count=$(ls {backup_dir}/test00* | wc -l)
|
||||
if [ $count -ge {count_limit} ]
|
||||
then
|
||||
exit 1
|
||||
else
|
||||
cp $1 {backup_dir}/wal/{node_name}/$2
|
||||
count=$((count+1))
|
||||
touch {backup_dir}/test00$count
|
||||
exit 0
|
||||
fi
|
||||
"""
|
||||
warning = """
|
||||
Wrong splint in show_pb
|
||||
Original Header:
|
||||
@ -874,8 +861,8 @@ class ProbackupTest(object):
|
||||
return out_dict
|
||||
|
||||
def set_archiving(
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False, compress=False,
|
||||
old_binary=False):
|
||||
self, backup_dir, instance, node, replica=False,
|
||||
overwrite=False, compress=False, old_binary=False):
|
||||
|
||||
if replica:
|
||||
archive_mode = 'always'
|
||||
|
@ -127,10 +127,11 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir))
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# syntax error in pg_probackup.conf
|
||||
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
|
||||
conf_file = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf")
|
||||
with open(conf_file, "a") as conf:
|
||||
conf.write(" = INFINITE\n")
|
||||
try:
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -139,7 +140,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertEqual(e.message,
|
||||
'ERROR: syntax error in " = INFINITE"\n',
|
||||
'ERROR: Syntax error in " = INFINITE"\n',
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
self.clean_pb(backup_dir)
|
||||
@ -147,7 +148,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
|
||||
# invalid value in pg_probackup.conf
|
||||
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
|
||||
with open(conf_file, "a") as conf:
|
||||
conf.write("BACKUP_MODE=\n")
|
||||
|
||||
try:
|
||||
@ -157,7 +158,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertEqual(e.message,
|
||||
'ERROR: invalid backup-mode ""\n',
|
||||
'ERROR: Invalid option "BACKUP_MODE" in file "{0}"\n'.format(conf_file),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
self.clean_pb(backup_dir)
|
||||
@ -165,7 +166,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
|
||||
# Command line parameters should override file values
|
||||
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
|
||||
with open(conf_file, "a") as conf:
|
||||
conf.write("retention-redundancy=1\n")
|
||||
|
||||
self.assertEqual(self.show_config(backup_dir, 'node')['retention-redundancy'], '1')
|
||||
@ -178,11 +179,11 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertEqual(e.message,
|
||||
'ERROR: option system-identifier cannot be specified in command line\n',
|
||||
'ERROR: Option system-identifier cannot be specified in command line\n',
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
# invalid value in pg_probackup.conf
|
||||
with open(os.path.join(backup_dir, "backups", "node", "pg_probackup.conf"), "a") as conf:
|
||||
with open(conf_file, "a") as conf:
|
||||
conf.write("SMOOTH_CHECKPOINT=FOO\n")
|
||||
|
||||
try:
|
||||
@ -192,7 +193,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertEqual(e.message,
|
||||
"ERROR: option -C, --smooth-checkpoint should be a boolean: 'FOO'\n",
|
||||
'ERROR: Invalid option "SMOOTH_CHECKPOINT" in file "{0}"\n'.format(conf_file),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
self.clean_pb(backup_dir)
|
||||
@ -200,8 +201,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
|
||||
# invalid option in pg_probackup.conf
|
||||
pbconf_path = os.path.join(backup_dir, "backups", "node", "pg_probackup.conf")
|
||||
with open(pbconf_path, "a") as conf:
|
||||
with open(conf_file, "a") as conf:
|
||||
conf.write("TIMELINEID=1\n")
|
||||
|
||||
try:
|
||||
@ -211,7 +211,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertEqual(e.message,
|
||||
'ERROR: invalid option "TIMELINEID" in file "{0}"\n'.format(pbconf_path),
|
||||
'ERROR: Invalid option "TIMELINEID" in file "{0}"\n'.format(conf_file),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
|
@ -3,6 +3,8 @@ import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
import gzip
|
||||
import shutil
|
||||
|
||||
module_name = 'page'
|
||||
|
||||
@ -781,7 +783,22 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
wals_dir, f)) and not f.endswith('.backup')]
|
||||
wals = map(str, wals)
|
||||
# file = os.path.join(wals_dir, max(wals))
|
||||
file = os.path.join(wals_dir, '000000010000000000000004')
|
||||
|
||||
if self.archive_compress:
|
||||
original_file = os.path.join(wals_dir, '000000010000000000000004.gz')
|
||||
tmp_file = os.path.join(backup_dir, '000000010000000000000004')
|
||||
|
||||
with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
|
||||
# drop healthy file
|
||||
os.remove(original_file)
|
||||
file = tmp_file
|
||||
|
||||
else:
|
||||
file = os.path.join(wals_dir, '000000010000000000000004')
|
||||
|
||||
# corrupt file
|
||||
print(file)
|
||||
with open(file, "rb+", 0) as f:
|
||||
f.seek(42)
|
||||
@ -790,7 +807,14 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
f.close
|
||||
|
||||
if self.archive_compress:
|
||||
file = file[:-3]
|
||||
# compress corrupted file and replace with it old file
|
||||
with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
|
||||
file = os.path.join(wals_dir, '000000010000000000000004.gz')
|
||||
|
||||
#if self.archive_compress:
|
||||
# file = file[:-3]
|
||||
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
@ -915,9 +939,6 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
print(file_destination)
|
||||
os.rename(file, file_destination)
|
||||
|
||||
if self.archive_compress:
|
||||
file_destination = file_destination[:-3]
|
||||
|
||||
# Single-thread PAGE backup
|
||||
try:
|
||||
self.backup_node(
|
||||
|
@ -130,7 +130,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
'max_wal_senders': '2',
|
||||
'archive_timeout': '30s'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -237,7 +239,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
options=[
|
||||
'-j10', '--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
|
@ -9,9 +9,9 @@ module_name = 'ptrack_clean'
|
||||
|
||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean(self):
|
||||
def test_ptrack_empty(self):
|
||||
"""Take backups of every available types and check that PTRACK is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -87,9 +87,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean_replica(self):
|
||||
def test_ptrack_empty_replica(self):
|
||||
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(
|
||||
|
@ -99,9 +99,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on', 'wal_level': 'replica',
|
||||
'max_wal_senders': '2', 'autovacuum': 'off',
|
||||
'checkpoint_timeout': '30s'}
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off',
|
||||
'archive_timeout': '30s'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -150,7 +152,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'
|
||||
]
|
||||
)
|
||||
# TODO: check that all ptrack are nullified
|
||||
|
@ -147,7 +147,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'
|
||||
]
|
||||
)
|
||||
|
||||
|
234
tests/replica.py
234
tests/replica.py
@ -5,7 +5,6 @@ from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
import time
|
||||
from shutil import copyfile
|
||||
|
||||
|
||||
module_name = 'replica'
|
||||
@ -27,8 +26,9 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s', 'ptrack_enable': 'on'}
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'ptrack_enable': 'on'}
|
||||
)
|
||||
master.start()
|
||||
self.init_pb(backup_dir)
|
||||
@ -144,7 +144,6 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s',
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
@ -171,7 +170,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
|
||||
# Settings for Replica
|
||||
self.set_replica(master, replica)
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
@ -187,31 +187,23 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(256,5120) i")
|
||||
"from generate_series(256,25120) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000003'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000003'))
|
||||
master.psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000004'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000004'))
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000005'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000005'))
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'--archive-timeout=30',
|
||||
'--archive-timeout=60',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
@ -222,8 +214,13 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir="{0}/{1}/node".format(module_name, fname))
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'replica', data_dir=node.data_dir)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'archive_mode = off'.format(node.port))
|
||||
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
@ -234,23 +231,25 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
# Change data on master, make PAGE backup from replica,
|
||||
# restore taken backup and check that restored data equal
|
||||
# to original data
|
||||
master.psql(
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(512,22680) i")
|
||||
master.pgbench_init(scale=5)
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
pgbench = master.pgbench(
|
||||
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='page',
|
||||
options=[
|
||||
'--archive-timeout=30',
|
||||
'--archive-timeout=60',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port),
|
||||
'--stream'])
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
pgbench.wait()
|
||||
|
||||
self.switch_wal_segment(master)
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM pgbench_accounts")
|
||||
|
||||
self.validate_pb(backup_dir, 'replica')
|
||||
self.assertEqual(
|
||||
@ -258,17 +257,21 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# RESTORE PAGE BACKUP TAKEN FROM replica
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', data_dir=node.data_dir, backup_id=backup_id)
|
||||
backup_dir, 'replica', data_dir=node.data_dir,
|
||||
backup_id=backup_id)
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node.port))
|
||||
|
||||
node.append_conf(
|
||||
'postgresql.auto.conf', 'archive_mode = off')
|
||||
|
||||
node.slow_start()
|
||||
|
||||
# CHECK DATA CORRECTNESS
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
after = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts")
|
||||
self.assertEqual(
|
||||
before, after, 'Restored data is not equal to original')
|
||||
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.backup_node(
|
||||
@ -290,8 +293,9 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -310,7 +314,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,8192) i")
|
||||
|
||||
before = master.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
|
||||
@ -320,6 +324,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
# Settings for Replica
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
@ -328,13 +333,9 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000003'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000003'))
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=['--archive-timeout=30s', '--stream'])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -353,14 +354,13 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'archive_timeout': '10s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
#master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
@ -369,6 +369,22 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(backup_dir, 'master', master)
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,165000) i")
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
master.psql(
|
||||
"postgres",
|
||||
"create table t_heap_1 as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,165000) i")
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
@ -376,36 +392,35 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
|
||||
# stupid hack
|
||||
copyfile(
|
||||
os.path.join(backup_dir, 'wal/master/000000010000000000000001'),
|
||||
os.path.join(backup_dir, 'wal/replica/000000010000000000000001'))
|
||||
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'hot_standby = on')
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
replica.append_conf(
|
||||
'recovery.conf', "recovery_min_apply_delay = '300s'")
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
replica.restart()
|
||||
|
||||
master.pgbench_init(scale=10)
|
||||
|
||||
pgbench = master.pgbench(
|
||||
options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
options=['-T', '60', '-c', '2', '--no-vacuum'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica)
|
||||
backup_dir, 'replica',
|
||||
replica, options=['--archive-timeout=60s'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
data_dir=replica.data_dir, backup_type='page')
|
||||
data_dir=replica.data_dir,
|
||||
backup_type='page', options=['--archive-timeout=60s'])
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='delta')
|
||||
backup_dir, 'replica', replica,
|
||||
backup_type='delta', options=['--archive-timeout=60s'])
|
||||
|
||||
pgbench.wait()
|
||||
|
||||
@ -428,106 +443,3 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@unittest.skip("skip")
|
||||
def test_make_block_from_future(self):
|
||||
"""
|
||||
make archive master, take full backups from master,
|
||||
restore full backup as replica, launch pgbench,
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
self.set_archiving(backup_dir, 'master', master)
|
||||
# force more frequent wal switch
|
||||
#master.append_conf('postgresql.auto.conf', 'archive_timeout = 10')
|
||||
master.slow_start()
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'master', replica, options=['-R'])
|
||||
|
||||
# Settings for Replica
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(replica.port))
|
||||
replica.append_conf(
|
||||
'postgresql.auto.conf', 'hot_standby = on')
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
master.pgbench_init(scale=10)
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_last_xlog_receive_location()'))
|
||||
#
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_last_xlog_replay_location()'))
|
||||
#
|
||||
# print(replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'select * from pg_catalog.pg_control_checkpoint()'))
|
||||
#
|
||||
# replica.safe_psql(
|
||||
# 'postgres',
|
||||
# 'checkpoint')
|
||||
|
||||
pgbench = master.pgbench(options=['-T', '30', '-c', '2', '--no-vacuum'])
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
#self.backup_node(backup_dir, 'replica', replica, options=['--stream'])
|
||||
exit(1)
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
pgbench.wait()
|
||||
|
||||
# pgbench
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256000) i")
|
||||
|
||||
|
||||
master.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
replica.safe_psql(
|
||||
'postgres',
|
||||
'select * from pg_')
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica)
|
||||
exit(1)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
@ -197,7 +197,9 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
fd.write("statuss = OK")
|
||||
fd.close()
|
||||
|
||||
self.assertIn('invalid option "statuss" in file'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
self.assertIn(
|
||||
'WARNING: Invalid option "statuss" in file'.format(file),
|
||||
self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
# self.del_test_dir(module_name, fname)
|
||||
|
@ -1519,10 +1519,17 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node1', node1,
|
||||
backup_type='page', options=["--stream"])
|
||||
self.restore_node(backup_dir, 'node1', data_dir=node2.data_dir)
|
||||
|
||||
node2.append_conf(
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node2.port))
|
||||
node2.append_conf(
|
||||
'postgresql.auto.conf', 'archive_mode = off')
|
||||
node2.slow_start()
|
||||
|
||||
node2.append_conf(
|
||||
'postgresql.auto.conf', 'archive_mode = on')
|
||||
node2.restart()
|
||||
|
||||
timeline_node1 = node1.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
timeline_node2 = node2.get_control_data()["Latest checkpoint's TimeLineID"]
|
||||
self.assertEqual(
|
||||
|
Loading…
Reference in New Issue
Block a user