mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-02-08 14:28:36 +02:00
Merge branch 'pgpro-533'
This commit is contained in:
commit
8f6fb524fd
3
Makefile
3
Makefile
@ -4,7 +4,8 @@ OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o src/status.o \
|
||||
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
|
||||
src/xlogreader.o src/streamutil.o src/receivelog.o \
|
||||
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o
|
||||
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
|
||||
src/utils/json.o
|
||||
|
||||
EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \
|
||||
src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h
|
||||
|
24
src/backup.c
24
src/backup.c
@ -633,7 +633,7 @@ do_backup_instance(void)
|
||||
* For backup from master wait for previous segment.
|
||||
* For backup from replica wait for current segment.
|
||||
*/
|
||||
!from_replica, backup_files_list);
|
||||
!current.from_replica, backup_files_list);
|
||||
}
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
@ -815,11 +815,15 @@ do_backup(time_t start_time)
|
||||
pgut_atexit_push(backup_disconnect, NULL);
|
||||
|
||||
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
|
||||
|
||||
current.compress_alg = compress_alg;
|
||||
current.compress_level = compress_level;
|
||||
|
||||
/* Confirm data block size and xlog block size are compatible */
|
||||
confirm_block_size("block_size", BLCKSZ);
|
||||
confirm_block_size("wal_block_size", XLOG_BLCKSZ);
|
||||
|
||||
from_replica = pg_is_in_recovery();
|
||||
current.from_replica = pg_is_in_recovery();
|
||||
|
||||
/* Confirm that this server version is supported */
|
||||
check_server_version();
|
||||
@ -859,7 +863,7 @@ do_backup(time_t start_time)
|
||||
}
|
||||
}
|
||||
|
||||
if (from_replica)
|
||||
if (current.from_replica)
|
||||
{
|
||||
/* Check master connection options */
|
||||
if (master_host == NULL)
|
||||
@ -956,7 +960,7 @@ check_server_version(void)
|
||||
"server version is %s, must be %s or higher",
|
||||
server_version_str, "9.5");
|
||||
|
||||
if (from_replica && server_version < 90600)
|
||||
if (current.from_replica && server_version < 90600)
|
||||
elog(ERROR,
|
||||
"server version is %s, must be %s or higher for backup from replica",
|
||||
server_version_str, "9.6");
|
||||
@ -1061,7 +1065,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
params[0] = label;
|
||||
|
||||
/* For replica we call pg_start_backup() on master */
|
||||
conn = (from_replica) ? master_conn : backup_conn;
|
||||
conn = (backup->from_replica) ? master_conn : backup_conn;
|
||||
|
||||
/* 2nd argument is 'fast'*/
|
||||
params[1] = smooth ? "false" : "true";
|
||||
@ -1112,7 +1116,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
||||
}
|
||||
|
||||
/* Wait for start_lsn to be replayed by replica */
|
||||
if (from_replica)
|
||||
if (backup->from_replica)
|
||||
wait_replica_wal_lsn(backup->start_lsn, true);
|
||||
}
|
||||
|
||||
@ -1555,8 +1559,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
||||
{
|
||||
uint32 try_count = 0;
|
||||
|
||||
Assert(from_replica);
|
||||
|
||||
while (true)
|
||||
{
|
||||
PGresult *res;
|
||||
@ -1651,7 +1653,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
elog(FATAL, "backup is not in progress");
|
||||
|
||||
/* For replica we call pg_stop_backup() on master */
|
||||
conn = (from_replica) ? master_conn : backup_conn;
|
||||
conn = (current.from_replica) ? master_conn : backup_conn;
|
||||
|
||||
/* Remove annoying NOTICE messages generated by backend */
|
||||
res = pgut_execute(conn, "SET client_min_messages = warning;",
|
||||
@ -1664,7 +1666,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
const char *params[1];
|
||||
char name[1024];
|
||||
|
||||
if (!from_replica)
|
||||
if (!current.from_replica)
|
||||
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
|
||||
base36enc(backup->start_time));
|
||||
else
|
||||
@ -1892,7 +1894,7 @@ pg_stop_backup(pgBackup *backup)
|
||||
stream_xlog_path[MAXPGPATH];
|
||||
|
||||
/* Wait for stop_lsn to be received by replica */
|
||||
if (from_replica)
|
||||
if (backup->from_replica)
|
||||
wait_replica_wal_lsn(stop_backup_lsn, false);
|
||||
/*
|
||||
* Wait for stop_lsn to be archived or streamed.
|
||||
|
@ -385,15 +385,17 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
|
||||
fprintf(out, "#Configuration\n");
|
||||
fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup));
|
||||
fprintf(out, "stream = %s\n", backup->stream?"true":"false");
|
||||
fprintf(out, "compress-alg = %s\n", deparse_compress_alg(compress_alg));
|
||||
fprintf(out, "compress-level = %d\n", compress_level);
|
||||
fprintf(out, "from-replica = %s\n", from_replica?"true":"false");
|
||||
fprintf(out, "stream = %s\n", backup->stream ? "true" : "false");
|
||||
fprintf(out, "compress-alg = %s\n",
|
||||
deparse_compress_alg(backup->compress_alg));
|
||||
fprintf(out, "compress-level = %d\n", backup->compress_level);
|
||||
fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false");
|
||||
|
||||
fprintf(out, "\n#Compatibility\n");
|
||||
fprintf(out, "block-size = %u\n", backup->block_size);
|
||||
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
|
||||
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
|
||||
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
|
||||
if (backup->server_version[0] != '\0')
|
||||
fprintf(out, "server-version = %s\n", backup->server_version);
|
||||
|
||||
@ -429,7 +431,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
if (backup->data_bytes != BYTES_INVALID)
|
||||
fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes);
|
||||
|
||||
if (backup->data_bytes != BYTES_INVALID)
|
||||
if (backup->wal_bytes != BYTES_INVALID)
|
||||
fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes);
|
||||
|
||||
fprintf(out, "status = %s\n", status2str(backup->status));
|
||||
@ -475,10 +477,9 @@ readBackupControlFile(const char *path)
|
||||
char *stop_lsn = NULL;
|
||||
char *status = NULL;
|
||||
char *parent_backup = NULL;
|
||||
char *compress_alg = NULL;
|
||||
char *program_version = NULL;
|
||||
char *server_version = NULL;
|
||||
int *compress_level;
|
||||
bool *from_replica;
|
||||
char *compress_alg = NULL;
|
||||
|
||||
pgut_option options[] =
|
||||
{
|
||||
@ -495,13 +496,14 @@ readBackupControlFile(const char *path)
|
||||
{'u', 0, "block-size", &backup->block_size, SOURCE_FILE_STRICT},
|
||||
{'u', 0, "xlog-block-size", &backup->wal_block_size, SOURCE_FILE_STRICT},
|
||||
{'u', 0, "checksum-version", &backup->checksum_version, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "program-version", &program_version, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "server-version", &server_version, SOURCE_FILE_STRICT},
|
||||
{'b', 0, "stream", &backup->stream, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "status", &status, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "parent-backup-id", &parent_backup, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "compress-alg", &compress_alg, SOURCE_FILE_STRICT},
|
||||
{'u', 0, "compress-level", &compress_level, SOURCE_FILE_STRICT},
|
||||
{'b', 0, "from-replica", &from_replica, SOURCE_FILE_STRICT},
|
||||
{'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT},
|
||||
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
|
||||
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
|
||||
{0}
|
||||
};
|
||||
@ -571,6 +573,13 @@ readBackupControlFile(const char *path)
|
||||
free(parent_backup);
|
||||
}
|
||||
|
||||
if (program_version)
|
||||
{
|
||||
StrNCpy(backup->program_version, program_version,
|
||||
sizeof(backup->program_version));
|
||||
pfree(program_version);
|
||||
}
|
||||
|
||||
if (server_version)
|
||||
{
|
||||
StrNCpy(backup->server_version, server_version,
|
||||
@ -578,6 +587,9 @@ readBackupControlFile(const char *path)
|
||||
pfree(server_version);
|
||||
}
|
||||
|
||||
if (compress_alg)
|
||||
backup->compress_alg = parse_compress_alg(compress_alg);
|
||||
|
||||
return backup;
|
||||
}
|
||||
|
||||
@ -626,6 +638,48 @@ deparse_backup_mode(BackupMode mode)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CompressAlg
|
||||
parse_compress_alg(const char *arg)
|
||||
{
|
||||
size_t len;
|
||||
|
||||
/* Skip all spaces detected */
|
||||
while (isspace((unsigned char)*arg))
|
||||
arg++;
|
||||
len = strlen(arg);
|
||||
|
||||
if (len == 0)
|
||||
elog(ERROR, "compress algrorithm is empty");
|
||||
|
||||
if (pg_strncasecmp("zlib", arg, len) == 0)
|
||||
return ZLIB_COMPRESS;
|
||||
else if (pg_strncasecmp("pglz", arg, len) == 0)
|
||||
return PGLZ_COMPRESS;
|
||||
else if (pg_strncasecmp("none", arg, len) == 0)
|
||||
return NONE_COMPRESS;
|
||||
else
|
||||
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
|
||||
|
||||
return NOT_DEFINED_COMPRESS;
|
||||
}
|
||||
|
||||
const char*
|
||||
deparse_compress_alg(int alg)
|
||||
{
|
||||
switch (alg)
|
||||
{
|
||||
case NONE_COMPRESS:
|
||||
case NOT_DEFINED_COMPRESS:
|
||||
return "none";
|
||||
case ZLIB_COMPRESS:
|
||||
return "zlib";
|
||||
case PGLZ_COMPRESS:
|
||||
return "pglz";
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* free pgBackup object */
|
||||
void
|
||||
pgBackupFree(void *backup)
|
||||
|
166
src/configure.c
166
src/configure.c
@ -2,19 +2,37 @@
|
||||
*
|
||||
* configure.c: - manage backup catalog.
|
||||
*
|
||||
* Copyright (c) 2017-2017, Postgres Professional
|
||||
* Copyright (c) 2017-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include "utils/json.h"
|
||||
|
||||
|
||||
static void opt_log_level_console(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_file(pgut_option *opt, const char *arg);
|
||||
static void opt_compress_alg(pgut_option *opt, const char *arg);
|
||||
|
||||
static void show_configure_start(void);
|
||||
static void show_configure_end(void);
|
||||
static void show_configure(pgBackupConfig *config);
|
||||
|
||||
static void show_configure_json(pgBackupConfig *config);
|
||||
|
||||
static pgBackupConfig *cur_config = NULL;
|
||||
|
||||
static PQExpBufferData show_buf;
|
||||
static int32 json_level = 0;
|
||||
|
||||
/*
|
||||
* All this code needs refactoring.
|
||||
*/
|
||||
|
||||
/* Set configure options */
|
||||
int
|
||||
do_configure(bool show_only)
|
||||
@ -68,7 +86,7 @@ do_configure(bool show_only)
|
||||
config->compress_level = compress_level;
|
||||
|
||||
if (show_only)
|
||||
writeBackupCatalogConfig(stderr, config);
|
||||
show_configure(config);
|
||||
else
|
||||
writeBackupCatalogConfigFile(config);
|
||||
|
||||
@ -251,7 +269,6 @@ readBackupCatalogConfigFile(void)
|
||||
pgut_readopt(path, options, ERROR);
|
||||
|
||||
return config;
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
@ -271,3 +288,146 @@ opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
{
|
||||
cur_config->compress_alg = parse_compress_alg(arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize configure visualization.
|
||||
*/
|
||||
static void
|
||||
show_configure_start(void)
|
||||
{
|
||||
if (show_format == SHOW_PLAIN)
|
||||
return;
|
||||
|
||||
/* For now we need buffer only for JSON format */
|
||||
json_level = 0;
|
||||
initPQExpBuffer(&show_buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finalize configure visualization.
|
||||
*/
|
||||
static void
|
||||
show_configure_end(void)
|
||||
{
|
||||
if (show_format == SHOW_PLAIN)
|
||||
return;
|
||||
else
|
||||
appendPQExpBufferChar(&show_buf, '\n');
|
||||
|
||||
fputs(show_buf.data, stdout);
|
||||
termPQExpBuffer(&show_buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show configure information of pg_probackup.
|
||||
*/
|
||||
static void
|
||||
show_configure(pgBackupConfig *config)
|
||||
{
|
||||
show_configure_start();
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
writeBackupCatalogConfig(stdout, config);
|
||||
else
|
||||
show_configure_json(config);
|
||||
|
||||
show_configure_end();
|
||||
}
|
||||
|
||||
/*
|
||||
* Json output.
|
||||
*/
|
||||
|
||||
static void
|
||||
show_configure_json(pgBackupConfig *config)
|
||||
{
|
||||
PQExpBuffer buf = &show_buf;
|
||||
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
|
||||
json_add_value(buf, "pgdata", config->pgdata, json_level, false);
|
||||
|
||||
json_add_key(buf, "system-identifier", json_level, true);
|
||||
appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier);
|
||||
|
||||
/* Connection parameters */
|
||||
if (config->pgdatabase)
|
||||
json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true);
|
||||
if (config->pghost)
|
||||
json_add_value(buf, "pghost", config->pghost, json_level, true);
|
||||
if (config->pgport)
|
||||
json_add_value(buf, "pgport", config->pgport, json_level, true);
|
||||
if (config->pguser)
|
||||
json_add_value(buf, "pguser", config->pguser, json_level, true);
|
||||
|
||||
/* Replica parameters */
|
||||
if (config->master_host)
|
||||
json_add_value(buf, "master-host", config->master_host, json_level,
|
||||
true);
|
||||
if (config->master_port)
|
||||
json_add_value(buf, "master-port", config->master_port, json_level,
|
||||
true);
|
||||
if (config->master_db)
|
||||
json_add_value(buf, "master-db", config->master_db, json_level, true);
|
||||
if (config->master_user)
|
||||
json_add_value(buf, "master-user", config->master_user, json_level,
|
||||
true);
|
||||
|
||||
if (config->replica_timeout != INT_MIN)
|
||||
{
|
||||
json_add_key(buf, "replica-timeout", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", config->replica_timeout);
|
||||
}
|
||||
|
||||
/* Logging parameters */
|
||||
if (config->log_level_console != INT_MIN)
|
||||
json_add_value(buf, "log-level-console",
|
||||
deparse_log_level(config->log_level_console), json_level,
|
||||
true);
|
||||
if (config->log_level_file != INT_MIN)
|
||||
json_add_value(buf, "log-level-file",
|
||||
deparse_log_level(config->log_level_file), json_level,
|
||||
true);
|
||||
if (config->log_filename)
|
||||
json_add_value(buf, "log-filename", config->log_filename, json_level,
|
||||
true);
|
||||
if (config->error_log_filename)
|
||||
json_add_value(buf, "error-log-filename", config->error_log_filename,
|
||||
json_level, true);
|
||||
if (config->log_directory)
|
||||
json_add_value(buf, "log-directory", config->log_directory, json_level,
|
||||
true);
|
||||
|
||||
if (config->log_rotation_size)
|
||||
{
|
||||
json_add_key(buf, "log-rotation-size", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", config->log_rotation_size);
|
||||
}
|
||||
if (config->log_rotation_age)
|
||||
{
|
||||
json_add_key(buf, "log-rotation-age", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", config->log_rotation_age);
|
||||
}
|
||||
|
||||
/* Retention parameters */
|
||||
if (config->retention_redundancy)
|
||||
{
|
||||
json_add_key(buf, "retention-redundancy", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", config->retention_redundancy);
|
||||
}
|
||||
if (config->retention_window)
|
||||
{
|
||||
json_add_key(buf, "retention-window", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", config->retention_window);
|
||||
}
|
||||
|
||||
/* Compression parameters */
|
||||
json_add_value(buf, "compress-algorithm",
|
||||
deparse_compress_alg(config->compress_alg), json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "compress-level", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", config->compress_level);
|
||||
|
||||
json_add(buf, JT_END_OBJECT, &json_level);
|
||||
}
|
||||
|
10
src/help.c
10
src/help.c
@ -89,6 +89,7 @@ help_pg_probackup(void)
|
||||
printf(_(" [--replica-timeout=timeout]\n"));
|
||||
|
||||
printf(_("\n %s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
|
||||
printf(_(" [--format=format]\n"));
|
||||
|
||||
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
|
||||
printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n"));
|
||||
@ -128,6 +129,7 @@ help_pg_probackup(void)
|
||||
|
||||
printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME);
|
||||
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
|
||||
printf(_(" [--format=format]\n"));
|
||||
|
||||
printf(_("\n %s delete -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
|
||||
printf(_(" [--wal] [-i backup-id | --expired]\n"));
|
||||
@ -357,11 +359,13 @@ static void
|
||||
help_show(void)
|
||||
{
|
||||
printf(_("%s show -B backup-dir\n"), PROGRAM_NAME);
|
||||
printf(_(" [--instance=instance_name [-i backup-id]]\n\n"));
|
||||
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
|
||||
printf(_(" [--format=format]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name show info about specific intstance\n"));
|
||||
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
|
||||
printf(_(" --format=format show format=PLAIN|JSON\n"));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -472,10 +476,12 @@ help_set_config(void)
|
||||
static void
|
||||
help_show_config(void)
|
||||
{
|
||||
printf(_("%s show-config -B backup-dir --instance=instance_name\n\n"), PROGRAM_NAME);
|
||||
printf(_("%s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME);
|
||||
printf(_(" [--format=format]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name name of the instance\n"));
|
||||
printf(_(" --format=format show format=PLAIN|JSON\n"));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -47,7 +47,6 @@ char *replication_slot = NULL;
|
||||
/* backup options */
|
||||
bool backup_logs = false;
|
||||
bool smooth_checkpoint;
|
||||
bool from_replica = false;
|
||||
bool is_remote_backup = false;
|
||||
/* Wait timeout for WAL segment archiving */
|
||||
uint32 archive_timeout = 300; /* default is 300 seconds */
|
||||
@ -83,7 +82,7 @@ uint32 retention_window = 0;
|
||||
/* compression options */
|
||||
CompressAlg compress_alg = NOT_DEFINED_COMPRESS;
|
||||
int compress_level = DEFAULT_COMPRESS_LEVEL;
|
||||
bool compress_shortcut = false;
|
||||
bool compress_shortcut = false;
|
||||
|
||||
/* other options */
|
||||
char *instance_name;
|
||||
@ -94,6 +93,9 @@ static char *wal_file_path;
|
||||
static char *wal_file_name;
|
||||
static bool file_overwrite = false;
|
||||
|
||||
/* show options */
|
||||
ShowFormat show_format = SHOW_PLAIN;
|
||||
|
||||
/* current settings */
|
||||
pgBackup current;
|
||||
ProbackupSubcmd backup_subcmd;
|
||||
@ -104,6 +106,7 @@ static void opt_backup_mode(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_console(pgut_option *opt, const char *arg);
|
||||
static void opt_log_level_file(pgut_option *opt, const char *arg);
|
||||
static void opt_compress_alg(pgut_option *opt, const char *arg);
|
||||
static void opt_show_format(pgut_option *opt, const char *arg);
|
||||
|
||||
static void compress_init(void);
|
||||
|
||||
@ -178,6 +181,8 @@ static pgut_option options[] =
|
||||
{ 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE },
|
||||
{ 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE },
|
||||
{ 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE },
|
||||
/* show options */
|
||||
{ 'f', 170, "format", opt_show_format, SOURCE_CMDLINE },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
@ -517,49 +522,31 @@ opt_log_level_file(pgut_option *opt, const char *arg)
|
||||
log_level_file = parse_log_level(arg);
|
||||
}
|
||||
|
||||
CompressAlg
|
||||
parse_compress_alg(const char *arg)
|
||||
static void
|
||||
opt_show_format(pgut_option *opt, const char *arg)
|
||||
{
|
||||
const char *v = arg;
|
||||
size_t len;
|
||||
|
||||
/* Skip all spaces detected */
|
||||
while (isspace((unsigned char)*arg))
|
||||
arg++;
|
||||
len = strlen(arg);
|
||||
while (IsSpace(*v))
|
||||
v++;
|
||||
len = strlen(v);
|
||||
|
||||
if (len == 0)
|
||||
elog(ERROR, "compress algrorithm is empty");
|
||||
|
||||
if (pg_strncasecmp("zlib", arg, len) == 0)
|
||||
return ZLIB_COMPRESS;
|
||||
else if (pg_strncasecmp("pglz", arg, len) == 0)
|
||||
return PGLZ_COMPRESS;
|
||||
else if (pg_strncasecmp("none", arg, len) == 0)
|
||||
return NONE_COMPRESS;
|
||||
else
|
||||
elog(ERROR, "invalid compress algorithm value \"%s\"", arg);
|
||||
|
||||
return NOT_DEFINED_COMPRESS;
|
||||
}
|
||||
|
||||
const char*
|
||||
deparse_compress_alg(int alg)
|
||||
{
|
||||
switch (alg)
|
||||
if (len > 0)
|
||||
{
|
||||
case NONE_COMPRESS:
|
||||
case NOT_DEFINED_COMPRESS:
|
||||
return "none";
|
||||
case ZLIB_COMPRESS:
|
||||
return "zlib";
|
||||
case PGLZ_COMPRESS:
|
||||
return "pglz";
|
||||
if (pg_strncasecmp("plain", v, len) == 0)
|
||||
show_format = SHOW_PLAIN;
|
||||
else if (pg_strncasecmp("json", v, len) == 0)
|
||||
show_format = SHOW_JSON;
|
||||
else
|
||||
elog(ERROR, "Invalid show format \"%s\"", arg);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
else
|
||||
elog(ERROR, "Invalid show format \"%s\"", arg);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
{
|
||||
compress_alg = parse_compress_alg(arg);
|
||||
@ -568,8 +555,8 @@ opt_compress_alg(pgut_option *opt, const char *arg)
|
||||
/*
|
||||
* Initialize compress and sanity checks for compress.
|
||||
*/
|
||||
static
|
||||
void compress_init(void)
|
||||
static void
|
||||
compress_init(void)
|
||||
{
|
||||
/* Default algorithm is zlib */
|
||||
if (compress_shortcut)
|
||||
|
@ -149,6 +149,12 @@ typedef enum ProbackupSubcmd
|
||||
SHOW_CONFIG
|
||||
} ProbackupSubcmd;
|
||||
|
||||
typedef enum ShowFormat
|
||||
{
|
||||
SHOW_PLAIN,
|
||||
SHOW_JSON
|
||||
} ShowFormat;
|
||||
|
||||
|
||||
/* special values of pgBackup fields */
|
||||
#define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */
|
||||
@ -214,20 +220,25 @@ typedef struct pgBackup
|
||||
/* Size of WAL files in archive needed to restore this backup */
|
||||
int64 wal_bytes;
|
||||
|
||||
CompressAlg compress_alg;
|
||||
int compress_level;
|
||||
|
||||
/* Fields needed for compatibility check */
|
||||
uint32 block_size;
|
||||
uint32 wal_block_size;
|
||||
uint32 checksum_version;
|
||||
|
||||
char program_version[100];
|
||||
char server_version[100];
|
||||
|
||||
bool stream; /* Was this backup taken in stream mode?
|
||||
bool stream; /* Was this backup taken in stream mode?
|
||||
* i.e. does it include all needed WAL files? */
|
||||
bool from_replica; /* Was this backup taken from replica */
|
||||
time_t parent_backup; /* Identifier of the previous backup.
|
||||
* Which is basic backup for this
|
||||
* incremental backup. */
|
||||
char *primary_conninfo; /* Connection parameters of the backup
|
||||
* in the format suitable for recovery.conf */
|
||||
char *primary_conninfo; /* Connection parameters of the backup
|
||||
* in the format suitable for recovery.conf */
|
||||
} pgBackup;
|
||||
|
||||
/* Recovery target for restore and validate subcommands */
|
||||
@ -310,7 +321,6 @@ extern char *replication_slot;
|
||||
/* backup options */
|
||||
extern bool smooth_checkpoint;
|
||||
extern uint32 archive_timeout;
|
||||
extern bool from_replica;
|
||||
extern bool is_remote_backup;
|
||||
extern const char *master_db;
|
||||
extern const char *master_host;
|
||||
@ -348,6 +358,9 @@ extern const char* deparse_compress_alg(int alg);
|
||||
extern char *instance_name;
|
||||
extern uint64 system_identifier;
|
||||
|
||||
/* show options */
|
||||
extern ShowFormat show_format;
|
||||
|
||||
/* current settings */
|
||||
extern pgBackup current;
|
||||
extern ProbackupSubcmd backup_subcmd;
|
||||
|
389
src/show.c
389
src/show.c
@ -3,28 +3,40 @@
|
||||
* show.c: show backup information.
|
||||
*
|
||||
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
||||
* Portions Copyright (c) 2015-2017, Postgres Professional
|
||||
* Portions Copyright (c) 2015-2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
static void show_backup_list(FILE *out, parray *backup_list);
|
||||
static void show_backup_detail(FILE *out, pgBackup *backup);
|
||||
static int do_show_instance(time_t requested_backup_id);
|
||||
#include "utils/json.h"
|
||||
|
||||
|
||||
static void show_instance_start(void);
|
||||
static void show_instance_end(void);
|
||||
static void show_instance(time_t requested_backup_id, bool show_name);
|
||||
static int show_backup(time_t requested_backup_id);
|
||||
|
||||
static void show_instance_plain(parray *backup_list, bool show_name);
|
||||
static void show_instance_json(parray *backup_list);
|
||||
|
||||
static PQExpBufferData show_buf;
|
||||
static bool first_instance = true;
|
||||
static int32 json_level = 0;
|
||||
|
||||
int
|
||||
do_show(time_t requested_backup_id)
|
||||
{
|
||||
|
||||
if (instance_name == NULL
|
||||
&& requested_backup_id != INVALID_BACKUP_ID)
|
||||
if (instance_name == NULL &&
|
||||
requested_backup_id != INVALID_BACKUP_ID)
|
||||
elog(ERROR, "You must specify --instance to use --backup_id option");
|
||||
|
||||
if (instance_name == NULL)
|
||||
@ -38,10 +50,12 @@ do_show(time_t requested_backup_id)
|
||||
join_path_components(path, backup_path, BACKUPS_DIR);
|
||||
dir = opendir(path);
|
||||
if (dir == NULL)
|
||||
elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno));
|
||||
elog(ERROR, "Cannot open directory \"%s\": %s",
|
||||
path, strerror(errno));
|
||||
|
||||
errno = 0;
|
||||
while ((dent = readdir(dir)))
|
||||
show_instance_start();
|
||||
|
||||
while (errno = 0, (dent = readdir(dir)) != NULL)
|
||||
{
|
||||
char child[MAXPGPATH];
|
||||
struct stat st;
|
||||
@ -54,73 +68,47 @@ do_show(time_t requested_backup_id)
|
||||
join_path_components(child, path, dent->d_name);
|
||||
|
||||
if (lstat(child, &st) == -1)
|
||||
elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno));
|
||||
elog(ERROR, "Cannot stat file \"%s\": %s",
|
||||
child, strerror(errno));
|
||||
|
||||
if (!S_ISDIR(st.st_mode))
|
||||
continue;
|
||||
|
||||
instance_name = dent->d_name;
|
||||
sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name);
|
||||
fprintf(stdout, "\nBACKUP INSTANCE '%s'\n", instance_name);
|
||||
do_show_instance(0);
|
||||
|
||||
show_instance(INVALID_BACKUP_ID, true);
|
||||
}
|
||||
|
||||
if (errno)
|
||||
elog(ERROR, "Cannot read directory \"%s\": %s",
|
||||
path, strerror(errno));
|
||||
|
||||
if (closedir(dir))
|
||||
elog(ERROR, "Cannot close directory \"%s\": %s",
|
||||
path, strerror(errno));
|
||||
|
||||
show_instance_end();
|
||||
|
||||
return 0;
|
||||
}
|
||||
else if (requested_backup_id == INVALID_BACKUP_ID ||
|
||||
show_format == SHOW_JSON)
|
||||
{
|
||||
show_instance_start();
|
||||
show_instance(requested_backup_id, false);
|
||||
show_instance_end();
|
||||
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
return do_show_instance(requested_backup_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* If 'requested_backup_id' is INVALID_BACKUP_ID, show brief meta information
|
||||
* about all backups in the backup instance.
|
||||
* If valid backup id is passed, show detailed meta information
|
||||
* about specified backup.
|
||||
*/
|
||||
static int
|
||||
do_show_instance(time_t requested_backup_id)
|
||||
{
|
||||
if (requested_backup_id != INVALID_BACKUP_ID)
|
||||
{
|
||||
pgBackup *backup;
|
||||
|
||||
backup = read_backup(requested_backup_id);
|
||||
if (backup == NULL)
|
||||
{
|
||||
elog(INFO, "Requested backup \"%s\" is not found.",
|
||||
/* We do not need free base36enc's result, we exit anyway */
|
||||
base36enc(requested_backup_id));
|
||||
/* This is not error */
|
||||
return 0;
|
||||
}
|
||||
|
||||
show_backup_detail(stdout, backup);
|
||||
|
||||
/* cleanup */
|
||||
pgBackupFree(backup);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
parray *backup_list;
|
||||
|
||||
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
|
||||
if (backup_list == NULL)
|
||||
elog(ERROR, "Failed to get backup list.");
|
||||
|
||||
show_backup_list(stdout, backup_list);
|
||||
|
||||
/* cleanup */
|
||||
parray_walk(backup_list, pgBackupFree);
|
||||
parray_free(backup_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return show_backup(requested_backup_id);
|
||||
}
|
||||
|
||||
static void
|
||||
pretty_size(int64 size, char *buf, size_t len)
|
||||
{
|
||||
int exp = 0;
|
||||
int exp = 0;
|
||||
|
||||
/* minus means the size is invalid */
|
||||
if (size < 0)
|
||||
@ -219,16 +207,113 @@ get_parent_tli(TimeLineID child_tli)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize instance visualization.
|
||||
*/
|
||||
static void
|
||||
show_backup_list(FILE *out, parray *backup_list)
|
||||
show_instance_start(void)
|
||||
{
|
||||
initPQExpBuffer(&show_buf);
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
return;
|
||||
|
||||
first_instance = true;
|
||||
json_level = 0;
|
||||
|
||||
appendPQExpBufferChar(&show_buf, '[');
|
||||
json_level++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finalize instance visualization.
|
||||
*/
|
||||
static void
|
||||
show_instance_end(void)
|
||||
{
|
||||
if (show_format == SHOW_JSON)
|
||||
appendPQExpBufferStr(&show_buf, "\n]\n");
|
||||
|
||||
fputs(show_buf.data, stdout);
|
||||
termPQExpBuffer(&show_buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show brief meta information about all backups in the backup instance.
|
||||
*/
|
||||
static void
|
||||
show_instance(time_t requested_backup_id, bool show_name)
|
||||
{
|
||||
parray *backup_list;
|
||||
|
||||
backup_list = catalog_get_backup_list(requested_backup_id);
|
||||
if (backup_list == NULL)
|
||||
elog(ERROR, "Failed to get backup list.");
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
show_instance_plain(backup_list, show_name);
|
||||
else if (show_format == SHOW_JSON)
|
||||
show_instance_json(backup_list);
|
||||
else
|
||||
elog(ERROR, "Invalid show format %d", (int) show_format);
|
||||
|
||||
/* cleanup */
|
||||
parray_walk(backup_list, pgBackupFree);
|
||||
parray_free(backup_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Show detailed meta information about specified backup.
|
||||
*/
|
||||
static int
|
||||
show_backup(time_t requested_backup_id)
|
||||
{
|
||||
pgBackup *backup;
|
||||
|
||||
backup = read_backup(requested_backup_id);
|
||||
if (backup == NULL)
|
||||
{
|
||||
elog(INFO, "Requested backup \"%s\" is not found.",
|
||||
/* We do not need free base36enc's result, we exit anyway */
|
||||
base36enc(requested_backup_id));
|
||||
/* This is not error */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (show_format == SHOW_PLAIN)
|
||||
pgBackupWriteControl(stdout, backup);
|
||||
else
|
||||
elog(ERROR, "Invalid show format %d", (int) show_format);
|
||||
|
||||
/* cleanup */
|
||||
pgBackupFree(backup);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Plain output.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Show instance backups in plain format.
|
||||
*/
|
||||
static void
|
||||
show_instance_plain(parray *backup_list, bool show_name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (show_name)
|
||||
printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name);
|
||||
|
||||
/* if you add new fields here, fix the header */
|
||||
/* show header */
|
||||
fputs("============================================================================================================================================\n", out);
|
||||
fputs(" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n", out);
|
||||
fputs("============================================================================================================================================\n", out);
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
"============================================================================================================================================\n");
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n");
|
||||
appendPQExpBufferStr(&show_buf,
|
||||
"============================================================================================================================================\n");
|
||||
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
@ -255,27 +340,163 @@ show_backup_list(FILE *out, parray *backup_list)
|
||||
/* Get parent timeline before printing */
|
||||
parent_tli = get_parent_tli(backup->tli);
|
||||
|
||||
fprintf(out, " %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
|
||||
instance_name,
|
||||
(backup->server_version[0] ? backup->server_version : "----"),
|
||||
base36enc(backup->start_time),
|
||||
timestamp,
|
||||
pgBackupGetBackupMode(backup),
|
||||
backup->stream ? "STREAM": "ARCHIVE",
|
||||
backup->tli,
|
||||
parent_tli,
|
||||
duration,
|
||||
data_bytes_str,
|
||||
(uint32) (backup->start_lsn >> 32),
|
||||
(uint32) backup->start_lsn,
|
||||
(uint32) (backup->stop_lsn >> 32),
|
||||
(uint32) backup->stop_lsn,
|
||||
status2str(backup->status));
|
||||
appendPQExpBuffer(&show_buf,
|
||||
" %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n",
|
||||
instance_name,
|
||||
(backup->server_version[0] ? backup->server_version : "----"),
|
||||
base36enc(backup->start_time),
|
||||
timestamp,
|
||||
pgBackupGetBackupMode(backup),
|
||||
backup->stream ? "STREAM": "ARCHIVE",
|
||||
backup->tli,
|
||||
parent_tli,
|
||||
duration,
|
||||
data_bytes_str,
|
||||
(uint32) (backup->start_lsn >> 32),
|
||||
(uint32) backup->start_lsn,
|
||||
(uint32) (backup->stop_lsn >> 32),
|
||||
(uint32) backup->stop_lsn,
|
||||
status2str(backup->status));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Json output.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Show instance backups in json format.
|
||||
*/
|
||||
static void
|
||||
show_backup_detail(FILE *out, pgBackup *backup)
|
||||
show_instance_json(parray *backup_list)
|
||||
{
|
||||
pgBackupWriteControl(out, backup);
|
||||
int i;
|
||||
PQExpBuffer buf = &show_buf;
|
||||
|
||||
if (!first_instance)
|
||||
appendPQExpBufferChar(buf, ',');
|
||||
|
||||
/* Begin of instance object */
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
|
||||
json_add_value(buf, "instance", instance_name, json_level, false);
|
||||
json_add_key(buf, "backups", json_level, true);
|
||||
|
||||
/*
|
||||
* List backups.
|
||||
*/
|
||||
json_add(buf, JT_BEGIN_ARRAY, &json_level);
|
||||
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *backup = parray_get(backup_list, i);
|
||||
TimeLineID parent_tli;
|
||||
char timestamp[100] = "----";
|
||||
char lsn[20];
|
||||
|
||||
if (i != 0)
|
||||
appendPQExpBufferChar(buf, ',');
|
||||
|
||||
json_add(buf, JT_BEGIN_OBJECT, &json_level);
|
||||
|
||||
json_add_value(buf, "id", base36enc(backup->start_time), json_level,
|
||||
false);
|
||||
|
||||
if (backup->parent_backup != 0)
|
||||
json_add_value(buf, "parent-backup-id",
|
||||
base36enc(backup->parent_backup), json_level, true);
|
||||
|
||||
json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup),
|
||||
json_level, true);
|
||||
|
||||
json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE",
|
||||
json_level, true);
|
||||
|
||||
json_add_value(buf, "compress-alg",
|
||||
deparse_compress_alg(backup->compress_alg), json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "compress-level", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", backup->compress_level);
|
||||
|
||||
json_add_value(buf, "from-replica",
|
||||
backup->from_replica ? "true" : "false", json_level,
|
||||
true);
|
||||
|
||||
json_add_key(buf, "block-size", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", backup->block_size);
|
||||
|
||||
json_add_key(buf, "xlog-block-size", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", backup->wal_block_size);
|
||||
|
||||
json_add_key(buf, "checksum-version", json_level, true);
|
||||
appendPQExpBuffer(buf, "%u", backup->checksum_version);
|
||||
|
||||
json_add_value(buf, "program-version", backup->program_version,
|
||||
json_level, true);
|
||||
json_add_value(buf, "server-version", backup->server_version,
|
||||
json_level, true);
|
||||
|
||||
json_add_key(buf, "current-tli", json_level, true);
|
||||
appendPQExpBuffer(buf, "%d", backup->tli);
|
||||
|
||||
json_add_key(buf, "parent-tli", json_level, true);
|
||||
parent_tli = get_parent_tli(backup->tli);
|
||||
appendPQExpBuffer(buf, "%u", parent_tli);
|
||||
|
||||
snprintf(lsn, lengthof(lsn), "%X/%X",
|
||||
(uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn);
|
||||
json_add_value(buf, "start-lsn", lsn, json_level, true);
|
||||
|
||||
snprintf(lsn, lengthof(lsn), "%X/%X",
|
||||
(uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn);
|
||||
json_add_value(buf, "stop-lsn", lsn, json_level, true);
|
||||
|
||||
time2iso(timestamp, lengthof(timestamp), backup->start_time);
|
||||
json_add_value(buf, "start-time", timestamp, json_level, true);
|
||||
|
||||
if (backup->end_time)
|
||||
{
|
||||
time2iso(timestamp, lengthof(timestamp), backup->end_time);
|
||||
json_add_value(buf, "end-time", timestamp, json_level, true);
|
||||
}
|
||||
|
||||
json_add_key(buf, "recovery-xid", json_level, true);
|
||||
appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid);
|
||||
|
||||
if (backup->recovery_time > 0)
|
||||
{
|
||||
time2iso(timestamp, lengthof(timestamp), backup->recovery_time);
|
||||
json_add_value(buf, "recovery-time", timestamp, json_level, true);
|
||||
}
|
||||
|
||||
if (backup->data_bytes != BYTES_INVALID)
|
||||
{
|
||||
json_add_key(buf, "data-bytes", json_level, true);
|
||||
appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes);
|
||||
}
|
||||
|
||||
if (backup->wal_bytes != BYTES_INVALID)
|
||||
{
|
||||
json_add_key(buf, "wal-bytes", json_level, true);
|
||||
appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes);
|
||||
}
|
||||
|
||||
if (backup->primary_conninfo)
|
||||
json_add_value(buf, "primary_conninfo", backup->primary_conninfo,
|
||||
json_level, true);
|
||||
|
||||
json_add_value(buf, "status", status2str(backup->status), json_level,
|
||||
true);
|
||||
|
||||
json_add(buf, JT_END_OBJECT, &json_level);
|
||||
}
|
||||
|
||||
/* End of backups */
|
||||
json_add(buf, JT_END_ARRAY, &json_level);
|
||||
|
||||
/* End of instance object */
|
||||
json_add(buf, JT_END_OBJECT, &json_level);
|
||||
|
||||
first_instance = false;
|
||||
}
|
||||
|
14
src/util.c
14
src/util.c
@ -176,8 +176,8 @@ uint32
|
||||
get_data_checksum_version(bool safe)
|
||||
{
|
||||
ControlFileData ControlFile;
|
||||
char *buffer;
|
||||
size_t size;
|
||||
char *buffer;
|
||||
size_t size;
|
||||
|
||||
/* First fetch file... */
|
||||
buffer = slurpFile(pgdata, "global/pg_control", &size, safe);
|
||||
@ -310,11 +310,21 @@ pgBackup_init(pgBackup *backup)
|
||||
backup->end_time = (time_t) 0;
|
||||
backup->recovery_xid = 0;
|
||||
backup->recovery_time = (time_t) 0;
|
||||
|
||||
backup->data_bytes = BYTES_INVALID;
|
||||
backup->wal_bytes = BYTES_INVALID;
|
||||
|
||||
backup->compress_alg = NOT_DEFINED_COMPRESS;
|
||||
backup->compress_level = 0;
|
||||
|
||||
backup->block_size = BLCKSZ;
|
||||
backup->wal_block_size = XLOG_BLCKSZ;
|
||||
backup->checksum_version = 0;
|
||||
|
||||
backup->stream = false;
|
||||
backup->from_replica = false;
|
||||
backup->parent_backup = 0;
|
||||
backup->primary_conninfo = NULL;
|
||||
backup->program_version[0] = '\0';
|
||||
backup->server_version[0] = '\0';
|
||||
}
|
||||
|
134
src/utils/json.c
Normal file
134
src/utils/json.c
Normal file
@ -0,0 +1,134 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* json.c: - make json document.
|
||||
*
|
||||
* Copyright (c) 2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "json.h"
|
||||
|
||||
static void json_add_indent(PQExpBuffer buf, int32 level);
|
||||
static void json_add_escaped(PQExpBuffer buf, const char *str);
|
||||
|
||||
/*
|
||||
* Start or end json token. Currently it is a json object or array.
|
||||
*
|
||||
* Function modifies level value and adds indent if it appropriate.
|
||||
*/
|
||||
void
|
||||
json_add(PQExpBuffer buf, JsonToken type, int32 *level)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case JT_BEGIN_ARRAY:
|
||||
appendPQExpBufferChar(buf, '[');
|
||||
*level += 1;
|
||||
break;
|
||||
case JT_END_ARRAY:
|
||||
*level -= 1;
|
||||
if (*level == 0)
|
||||
appendPQExpBufferChar(buf, '\n');
|
||||
else
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, ']');
|
||||
break;
|
||||
case JT_BEGIN_OBJECT:
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, '{');
|
||||
*level += 1;
|
||||
break;
|
||||
case JT_END_OBJECT:
|
||||
*level -= 1;
|
||||
if (*level == 0)
|
||||
appendPQExpBufferChar(buf, '\n');
|
||||
else
|
||||
json_add_indent(buf, *level);
|
||||
appendPQExpBufferChar(buf, '}');
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add json object's key. If it isn't first key we need to add a comma.
|
||||
*/
|
||||
void
|
||||
json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma)
|
||||
{
|
||||
if (add_comma)
|
||||
appendPQExpBufferChar(buf, ',');
|
||||
json_add_indent(buf, level);
|
||||
|
||||
json_add_escaped(buf, name);
|
||||
appendPQExpBufferStr(buf, ": ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Add json object's key and value. If it isn't first key we need to add a
|
||||
* comma.
|
||||
*/
|
||||
void
|
||||
json_add_value(PQExpBuffer buf, const char *name, const char *value,
|
||||
int32 level, bool add_comma)
|
||||
{
|
||||
json_add_key(buf, name, level, add_comma);
|
||||
json_add_escaped(buf, value);
|
||||
}
|
||||
|
||||
static void
|
||||
json_add_indent(PQExpBuffer buf, int32 level)
|
||||
{
|
||||
uint16 i;
|
||||
|
||||
if (level == 0)
|
||||
return;
|
||||
|
||||
appendPQExpBufferChar(buf, '\n');
|
||||
for (i = 0; i < level; i++)
|
||||
appendPQExpBufferStr(buf, " ");
|
||||
}
|
||||
|
||||
static void
|
||||
json_add_escaped(PQExpBuffer buf, const char *str)
|
||||
{
|
||||
const char *p;
|
||||
|
||||
appendPQExpBufferChar(buf, '"');
|
||||
for (p = str; *p; p++)
|
||||
{
|
||||
switch (*p)
|
||||
{
|
||||
case '\b':
|
||||
appendPQExpBufferStr(buf, "\\b");
|
||||
break;
|
||||
case '\f':
|
||||
appendPQExpBufferStr(buf, "\\f");
|
||||
break;
|
||||
case '\n':
|
||||
appendPQExpBufferStr(buf, "\\n");
|
||||
break;
|
||||
case '\r':
|
||||
appendPQExpBufferStr(buf, "\\r");
|
||||
break;
|
||||
case '\t':
|
||||
appendPQExpBufferStr(buf, "\\t");
|
||||
break;
|
||||
case '"':
|
||||
appendPQExpBufferStr(buf, "\\\"");
|
||||
break;
|
||||
case '\\':
|
||||
appendPQExpBufferStr(buf, "\\\\");
|
||||
break;
|
||||
default:
|
||||
if ((unsigned char) *p < ' ')
|
||||
appendPQExpBuffer(buf, "\\u%04x", (int) *p);
|
||||
else
|
||||
appendPQExpBufferChar(buf, *p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
appendPQExpBufferChar(buf, '"');
|
||||
}
|
33
src/utils/json.h
Normal file
33
src/utils/json.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* json.h: - prototypes of json output functions.
|
||||
*
|
||||
* Copyright (c) 2018, Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef PROBACKUP_JSON_H
|
||||
#define PROBACKUP_JSON_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
/*
|
||||
* Json document tokens.
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
JT_BEGIN_ARRAY,
|
||||
JT_END_ARRAY,
|
||||
JT_BEGIN_OBJECT,
|
||||
JT_END_OBJECT
|
||||
} JsonToken;
|
||||
|
||||
extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level);
|
||||
extern void json_add_key(PQExpBuffer buf, const char *name, int32 level,
|
||||
bool add_comma);
|
||||
extern void json_add_value(PQExpBuffer buf, const char *name, const char *value,
|
||||
int32 level, bool add_comma);
|
||||
|
||||
#endif /* PROBACKUP_JSON_H */
|
@ -59,7 +59,7 @@ typedef enum pgut_optsrc
|
||||
typedef struct pgut_option
|
||||
{
|
||||
char type;
|
||||
char sname; /* short name */
|
||||
uint8 sname; /* short name */
|
||||
const char *lname; /* long name */
|
||||
void *var; /* pointer to variable */
|
||||
pgut_optsrc allowed; /* allowed source */
|
||||
|
@ -426,7 +426,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_replica_archive(self):
|
||||
"""make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
|
||||
"""
|
||||
make node without archiving, take stream backup and
|
||||
turn it into replica, set replica with archiving,
|
||||
make archive backup from replica
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
master = self.make_simple_node(
|
||||
@ -467,7 +471,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
after = replica.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
# Change data on master, take FULL backup from replica, restore taken backup and check that restored data equal to original data
|
||||
# Change data on master, take FULL backup from replica,
|
||||
# restore taken backup and check that restored data equal
|
||||
# to original data
|
||||
master.psql(
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
@ -501,7 +507,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
after = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(before, after)
|
||||
|
||||
# Change data on master, make PAGE backup from replica, restore taken backup and check that restored data equal to original data
|
||||
# Change data on master, make PAGE backup from replica,
|
||||
# restore taken backup and check that restored data equal
|
||||
# to original data
|
||||
master.psql(
|
||||
"postgres",
|
||||
"insert into t_heap as select i as id, md5(i::text) as text, "
|
||||
|
@ -29,15 +29,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
# full backup mode
|
||||
# with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
|
||||
# backup_log.write(self.backup_node(node, options=["--verbose"]))
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
show_backup = self.show_pb(backup_dir, 'node')[0]
|
||||
|
||||
self.assertEqual(show_backup['Status'], "OK")
|
||||
self.assertEqual(show_backup['Mode'], "FULL")
|
||||
self.assertEqual(show_backup['status'], "OK")
|
||||
self.assertEqual(show_backup['backup-mode'], "FULL")
|
||||
|
||||
# postmaster.pid and postmaster.opts shouldn't be copied
|
||||
excluded = True
|
||||
@ -61,29 +57,29 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# print self.show_pb(node)
|
||||
show_backup = self.show_pb(backup_dir, 'node')[1]
|
||||
self.assertEqual(show_backup['Status'], "OK")
|
||||
self.assertEqual(show_backup['Mode'], "PAGE")
|
||||
self.assertEqual(show_backup['status'], "OK")
|
||||
self.assertEqual(show_backup['backup-mode'], "PAGE")
|
||||
|
||||
# Check parent backup
|
||||
self.assertEqual(
|
||||
backup_id,
|
||||
self.show_pb(
|
||||
backup_dir, 'node',
|
||||
backup_id=show_backup['ID'])["parent-backup-id"])
|
||||
backup_id=show_backup['id'])["parent-backup-id"])
|
||||
|
||||
# ptrack backup mode
|
||||
self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
|
||||
|
||||
show_backup = self.show_pb(backup_dir, 'node')[2]
|
||||
self.assertEqual(show_backup['Status'], "OK")
|
||||
self.assertEqual(show_backup['Mode'], "PTRACK")
|
||||
self.assertEqual(show_backup['status'], "OK")
|
||||
self.assertEqual(show_backup['backup-mode'], "PTRACK")
|
||||
|
||||
# Check parent backup
|
||||
self.assertEqual(
|
||||
page_backup_id,
|
||||
self.show_pb(
|
||||
backup_dir, 'node',
|
||||
backup_id=show_backup['ID'])["parent-backup-id"])
|
||||
backup_id=show_backup['id'])["parent-backup-id"])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -106,7 +102,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["-C"])
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
node.stop()
|
||||
|
||||
# Clean after yourself
|
||||
@ -162,7 +158,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[0]['Status'],
|
||||
self.show_pb(backup_dir, 'node')[0]['status'],
|
||||
"ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
@ -227,7 +223,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'], "ERROR")
|
||||
self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -250,12 +246,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4"])
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="ptrack", options=["-j", "4"])
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -282,11 +278,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="ptrack", options=["-j", "4", "--stream"])
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -342,7 +338,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
f.close
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
|
||||
"Backup Status should be OK")
|
||||
|
||||
# Clean after yourself
|
||||
@ -415,7 +411,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
|
||||
"Backup Status should be ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
|
@ -44,13 +44,13 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
id_1 = show_backups[0]['ID']
|
||||
id_2 = show_backups[1]['ID']
|
||||
id_3 = show_backups[2]['ID']
|
||||
id_1 = show_backups[0]['id']
|
||||
id_2 = show_backups[1]['id']
|
||||
id_3 = show_backups[2]['id']
|
||||
self.delete_pb(backup_dir, 'node', id_2)
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(show_backups[0]['ID'], id_1)
|
||||
self.assertEqual(show_backups[1]['ID'], id_3)
|
||||
self.assertEqual(show_backups[0]['id'], id_1)
|
||||
self.assertEqual(show_backups[1]['id'], id_3)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -82,15 +82,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(len(show_backups), 4)
|
||||
|
||||
# delete first page backup
|
||||
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
|
||||
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
|
||||
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(len(show_backups), 2)
|
||||
|
||||
self.assertEqual(show_backups[0]['Mode'], "FULL")
|
||||
self.assertEqual(show_backups[0]['Status'], "OK")
|
||||
self.assertEqual(show_backups[1]['Mode'], "FULL")
|
||||
self.assertEqual(show_backups[1]['Status'], "OK")
|
||||
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
|
||||
self.assertEqual(show_backups[0]['status'], "OK")
|
||||
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
|
||||
self.assertEqual(show_backups[1]['status'], "OK")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -122,15 +122,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertEqual(len(show_backups), 4)
|
||||
|
||||
# delete first page backup
|
||||
self.delete_pb(backup_dir, 'node', show_backups[1]['ID'])
|
||||
self.delete_pb(backup_dir, 'node', show_backups[1]['id'])
|
||||
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(len(show_backups), 2)
|
||||
|
||||
self.assertEqual(show_backups[0]['Mode'], "FULL")
|
||||
self.assertEqual(show_backups[0]['Status'], "OK")
|
||||
self.assertEqual(show_backups[1]['Mode'], "FULL")
|
||||
self.assertEqual(show_backups[1]['Status'], "OK")
|
||||
self.assertEqual(show_backups[0]['backup-mode'], "FULL")
|
||||
self.assertEqual(show_backups[0]['status'], "OK")
|
||||
self.assertEqual(show_backups[1]['backup-mode'], "FULL")
|
||||
self.assertEqual(show_backups[1]['status'], "OK")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -1191,7 +1191,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
f.close
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
|
||||
"Backup Status should be OK")
|
||||
|
||||
# Clean after yourself
|
||||
@ -1264,7 +1264,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
|
||||
"Backup Status should be ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
|
@ -43,25 +43,33 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
||||
"WHERE oid = pg_my_temp_schema()")[0][0]
|
||||
conn.commit()
|
||||
|
||||
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "")
|
||||
temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace(
|
||||
"pg_", "")
|
||||
conn.commit()
|
||||
|
||||
conn.execute("create index test_idx on test (generate_series)")
|
||||
conn.commit()
|
||||
|
||||
heap_path = conn.execute("select pg_relation_filepath('test')")[0][0]
|
||||
heap_path = conn.execute(
|
||||
"select pg_relation_filepath('test')")[0][0]
|
||||
conn.commit()
|
||||
|
||||
index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0]
|
||||
index_path = conn.execute(
|
||||
"select pg_relation_filepath('test_idx')")[0][0]
|
||||
conn.commit()
|
||||
|
||||
heap_oid = conn.execute("select 'test'::regclass::oid")[0][0]
|
||||
conn.commit()
|
||||
|
||||
toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
|
||||
toast_path = conn.execute(
|
||||
"select pg_relation_filepath('{0}.{1}')".format(
|
||||
temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0]
|
||||
conn.commit()
|
||||
|
||||
toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0]
|
||||
toast_idx_path = conn.execute(
|
||||
"select pg_relation_filepath('{0}.{1}')".format(
|
||||
temp_toast_schema_name,
|
||||
"pg_toast_" + str(heap_oid) + "_index"))[0][0]
|
||||
conn.commit()
|
||||
|
||||
temp_table_filename = os.path.basename(heap_path)
|
||||
|
@ -12,6 +12,7 @@ import select
|
||||
import psycopg2
|
||||
from time import sleep
|
||||
import re
|
||||
import json
|
||||
|
||||
idx_ptrack = {
|
||||
't_heap': {
|
||||
@ -598,7 +599,7 @@ class ProbackupTest(object):
|
||||
|
||||
def show_pb(
|
||||
self, backup_dir, instance=None, backup_id=None,
|
||||
options=[], as_text=False
|
||||
options=[], as_text=False, as_json=True
|
||||
):
|
||||
|
||||
backup_list = []
|
||||
@ -613,63 +614,83 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
if as_json:
|
||||
cmd_list += ["--format=json"]
|
||||
|
||||
if as_text:
|
||||
# You should print it when calling as_text=true
|
||||
return self.run_pb(cmd_list + options)
|
||||
|
||||
# get show result as list of lines
|
||||
show_splitted = self.run_pb(cmd_list + options).splitlines()
|
||||
if instance is not None and backup_id is None:
|
||||
# cut header(ID, Mode, etc) from show as single string
|
||||
header = show_splitted[1:2][0]
|
||||
# cut backup records from show as single list
|
||||
# with string for every backup record
|
||||
body = show_splitted[3:]
|
||||
# inverse list so oldest record come first
|
||||
body = body[::-1]
|
||||
# split string in list with string for every header element
|
||||
header_split = re.split(" +", header)
|
||||
# Remove empty items
|
||||
for i in header_split:
|
||||
if i == '':
|
||||
header_split.remove(i)
|
||||
if as_json:
|
||||
data = json.loads(self.run_pb(cmd_list + options))
|
||||
# print(data)
|
||||
for instance_data in data:
|
||||
# find specific instance if requested
|
||||
if instance and instance_data['instance'] != instance:
|
||||
continue
|
||||
header_split = [
|
||||
header_element.rstrip() for header_element in header_split
|
||||
]
|
||||
for backup_record in body:
|
||||
backup_record = backup_record.rstrip()
|
||||
# split list with str for every backup record element
|
||||
backup_record_split = re.split(" +", backup_record)
|
||||
# Remove empty items
|
||||
for i in backup_record_split:
|
||||
if i == '':
|
||||
backup_record_split.remove(i)
|
||||
if len(header_split) != len(backup_record_split):
|
||||
print(warning.format(
|
||||
header=header, body=body,
|
||||
header_split=header_split,
|
||||
body_split=backup_record_split)
|
||||
)
|
||||
exit(1)
|
||||
new_dict = dict(zip(header_split, backup_record_split))
|
||||
backup_list.append(new_dict)
|
||||
|
||||
for backup in reversed(instance_data['backups']):
|
||||
# find specific backup if requested
|
||||
if backup_id:
|
||||
if backup['id'] == backup_id:
|
||||
return backup
|
||||
else:
|
||||
backup_list.append(backup)
|
||||
return backup_list
|
||||
else:
|
||||
# cut out empty lines and lines started with #
|
||||
# and other garbage then reconstruct it as dictionary
|
||||
# print show_splitted
|
||||
sanitized_show = [item for item in show_splitted if item]
|
||||
sanitized_show = [
|
||||
item for item in sanitized_show if not item.startswith('#')
|
||||
]
|
||||
# print sanitized_show
|
||||
for line in sanitized_show:
|
||||
name, var = line.partition(" = ")[::2]
|
||||
var = var.strip('"')
|
||||
var = var.strip("'")
|
||||
specific_record[name.strip()] = var
|
||||
return specific_record
|
||||
show_splitted = self.run_pb(cmd_list + options).splitlines()
|
||||
if instance is not None and backup_id is None:
|
||||
# cut header(ID, Mode, etc) from show as single string
|
||||
header = show_splitted[1:2][0]
|
||||
# cut backup records from show as single list
|
||||
# with string for every backup record
|
||||
body = show_splitted[3:]
|
||||
# inverse list so oldest record come first
|
||||
body = body[::-1]
|
||||
# split string in list with string for every header element
|
||||
header_split = re.split(" +", header)
|
||||
# Remove empty items
|
||||
for i in header_split:
|
||||
if i == '':
|
||||
header_split.remove(i)
|
||||
continue
|
||||
header_split = [
|
||||
header_element.rstrip() for header_element in header_split
|
||||
]
|
||||
for backup_record in body:
|
||||
backup_record = backup_record.rstrip()
|
||||
# split list with str for every backup record element
|
||||
backup_record_split = re.split(" +", backup_record)
|
||||
# Remove empty items
|
||||
for i in backup_record_split:
|
||||
if i == '':
|
||||
backup_record_split.remove(i)
|
||||
if len(header_split) != len(backup_record_split):
|
||||
print(warning.format(
|
||||
header=header, body=body,
|
||||
header_split=header_split,
|
||||
body_split=backup_record_split)
|
||||
)
|
||||
exit(1)
|
||||
new_dict = dict(zip(header_split, backup_record_split))
|
||||
backup_list.append(new_dict)
|
||||
return backup_list
|
||||
else:
|
||||
# cut out empty lines and lines started with #
|
||||
# and other garbage then reconstruct it as dictionary
|
||||
# print show_splitted
|
||||
sanitized_show = [item for item in show_splitted if item]
|
||||
sanitized_show = [
|
||||
item for item in sanitized_show if not item.startswith('#')
|
||||
]
|
||||
# print sanitized_show
|
||||
for line in sanitized_show:
|
||||
name, var = line.partition(" = ")[::2]
|
||||
var = var.strip('"')
|
||||
var = var.strip("'")
|
||||
specific_record[name.strip()] = var
|
||||
return specific_record
|
||||
|
||||
def validate_pb(
|
||||
self, backup_dir, instance=None,
|
||||
|
@ -63,7 +63,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
backup_id = self.show_pb(backup_dir, 'node')[0]['ID']
|
||||
backup_id = self.show_pb(backup_dir, 'node')[0]['id']
|
||||
self.assertEqual(
|
||||
'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'],
|
||||
'Backup should have ERROR status')
|
||||
|
103
tests/ptrack.py
103
tests/ptrack.py
@ -268,7 +268,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -430,7 +431,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node_restored.append_conf(
|
||||
@ -503,7 +505,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
@ -584,8 +587,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd)
|
||||
)
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
@ -604,12 +607,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
@ -688,9 +692,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd)
|
||||
)
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
|
||||
full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(full_result, full_result_new)
|
||||
node.cleanup()
|
||||
@ -711,12 +717,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
while node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap")
|
||||
self.assertEqual(ptrack_result, ptrack_result_new)
|
||||
@ -811,7 +818,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_pgpro417(self):
|
||||
"""Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail"""
|
||||
"""
|
||||
Make archive node, take full backup, take page backup,
|
||||
delete page backup. Try to take ptrack backup, which should fail
|
||||
"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -880,7 +890,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_full_pgpro417(self):
|
||||
"""Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail"""
|
||||
"""
|
||||
Make node, take two full backups, delete full second backup.
|
||||
Try to take ptrack backup, which should fail
|
||||
"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -954,7 +967,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_create_db(self):
|
||||
"""Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense"""
|
||||
"""
|
||||
Make node, take full backup, create database db1, take ptrack backup,
|
||||
restore database and check it presense
|
||||
"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -1017,7 +1033,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1046,7 +1063,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1151,7 +1169,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET RESTORED PGDATA AND COMPARE
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1159,8 +1178,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
'postgresql.auto.conf', 'port = {0}'.format(node_restored.port))
|
||||
node_restored.start()
|
||||
|
||||
while node_restored.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while node_restored.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
result_new = node_restored.safe_psql(
|
||||
"postgres", "select * from t_heap")
|
||||
@ -1229,7 +1248,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
@ -1240,7 +1260,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_drop_tablespace(self):
|
||||
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
|
||||
"""
|
||||
Make node, create table, alter table tablespace, take ptrack backup,
|
||||
move table from tablespace, take ptrack backup
|
||||
"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -1321,7 +1344,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_alter_tablespace(self):
|
||||
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
|
||||
"""
|
||||
Make node, create table, alter table tablespace, take ptrack backup,
|
||||
move table from tablespace, take ptrack backup
|
||||
"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -1379,15 +1405,16 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql(
|
||||
"postgres", "select pg_is_in_recovery()") == 't\n':
|
||||
while restored_node.psql(
|
||||
"postgres", "select pg_is_in_recovery()")[0] != 0:
|
||||
time.sleep(1)
|
||||
|
||||
# COMPARE LOGICAL CONTENT
|
||||
@ -1416,14 +1443,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql(
|
||||
while restored_node.psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
@ -1437,7 +1465,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_multiple_segments(self):
|
||||
"""Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup"""
|
||||
"""
|
||||
Make node, create table, alter table tablespace,
|
||||
take ptrack backup, move table from tablespace, take ptrack backup
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
@ -1446,9 +1477,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica', 'max_wal_senders': '2',
|
||||
'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '128MB',
|
||||
'maintenance_work_mem': '1GB', 'autovacuum': 'off',
|
||||
'full_page_writes': 'off'}
|
||||
'ptrack_enable': 'on', 'fsync': 'off',
|
||||
'autovacuum': 'off',
|
||||
'full_page_writes': 'off'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
@ -1514,14 +1546,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# GET PHYSICAL CONTENT FROM NODE_RESTORED
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False)
|
||||
pgdata_restored = self.pgdata_content(
|
||||
restored_node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
restored_node.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(restored_node.port))
|
||||
restored_node.start()
|
||||
while restored_node.safe_psql(
|
||||
while restored_node.psql(
|
||||
"postgres",
|
||||
"select pg_is_in_recovery()") == 't\n':
|
||||
time.sleep(1)
|
||||
|
@ -718,7 +718,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
con.commit()
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
|
||||
# 1 - Try to restore to existing directory
|
||||
node.stop()
|
||||
@ -785,8 +785,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node, backup_type="page")
|
||||
|
||||
show_pb = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(show_pb[1]['Status'], "OK")
|
||||
self.assertEqual(show_pb[2]['Status'], "OK")
|
||||
self.assertEqual(show_pb[1]['status'], "OK")
|
||||
self.assertEqual(show_pb[2]['status'], "OK")
|
||||
|
||||
node.stop()
|
||||
node.cleanup()
|
||||
@ -829,7 +829,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Full backup
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
|
||||
|
||||
# Create tablespace
|
||||
tblspc_path = os.path.join(node.base_dir, "tblspc")
|
||||
@ -845,8 +845,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# First page backup
|
||||
self.backup_node(backup_dir, 'node', node, backup_type="page")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Mode'], "PAGE")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE")
|
||||
|
||||
# Create tablespace table
|
||||
with node.connect("postgres") as con:
|
||||
@ -862,8 +863,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
# Second page backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="page")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Status'], "OK")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Mode'], "PAGE")
|
||||
self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK")
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE")
|
||||
|
||||
node.stop()
|
||||
node.cleanup()
|
||||
|
@ -14,7 +14,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
def test_retention_redundancy_1(self):
|
||||
"""purge backups using redundancy-based retention policy"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
@ -24,7 +25,9 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
with open(os.path.join(backup_dir, 'backups', 'node', "pg_probackup.conf"), "a") as conf:
|
||||
with open(os.path.join(
|
||||
backup_dir, 'backups', 'node',
|
||||
"pg_probackup.conf"), "a") as conf:
|
||||
conf.write("retention-redundancy = 1\n")
|
||||
|
||||
# Make backups to be purged
|
||||
@ -57,7 +60,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')):
|
||||
if not wal_name.endswith(".backup"):
|
||||
#wal_name_b = wal_name.encode('ascii')
|
||||
# wal_name_b = wal_name.encode('ascii')
|
||||
self.assertEqual(wal_name[8:] > min_wal[8:], True)
|
||||
self.assertEqual(wal_name[8:] > max_wal[8:], True)
|
||||
|
||||
@ -68,7 +71,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
def test_retention_window_2(self):
|
||||
"""purge backups using window-based retention policy"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
@ -36,6 +36,35 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_show_json(self):
|
||||
"""Status DONE and OK"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.assertEqual(
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["--log-level-console=panic"]),
|
||||
None
|
||||
)
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_corrupt_2(self):
|
||||
"""Status CORRUPT"""
|
||||
|
@ -908,7 +908,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
target_xid = None
|
||||
with node.connect("postgres") as con:
|
||||
res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
||||
res = con.execute(
|
||||
"INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)")
|
||||
con.commit()
|
||||
target_xid = res[0][0]
|
||||
|
||||
@ -1041,7 +1042,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_validate_corrupt_wal_between_backups(self):
|
||||
"""make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors"""
|
||||
"""
|
||||
make archive node, make full backup, corrupt all wal files,
|
||||
run validate to real xid, expect errors
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
@ -1083,7 +1087,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
else:
|
||||
walfile = node.safe_psql(
|
||||
'postgres',
|
||||
'select pg_walfile_name(pg_current_wal_location())').rstrip()
|
||||
'select pg_walfile_name(pg_current_wal_lsn())').rstrip()
|
||||
|
||||
if self.archive_compress:
|
||||
walfile = walfile + '.gz'
|
||||
@ -1134,12 +1138,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.assertEqual(
|
||||
'OK',
|
||||
self.show_pb(backup_dir, 'node')[0]['Status'],
|
||||
self.show_pb(backup_dir, 'node')[0]['status'],
|
||||
'Backup STATUS should be "OK"')
|
||||
|
||||
self.assertEqual(
|
||||
'OK',
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'],
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
'Backup STATUS should be "OK"')
|
||||
|
||||
# Clean after yourself
|
||||
@ -1208,7 +1212,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.assertEqual(
|
||||
'ERROR',
|
||||
self.show_pb(backup_dir, 'node')[1]['Status'],
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
'Backup {0} should have STATUS "ERROR"')
|
||||
|
||||
# Clean after yourself
|
||||
@ -1405,7 +1409,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
except ProbackupException as e:
|
||||
pass
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
|
||||
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.reload()
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
@ -1440,14 +1444,19 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN')
|
||||
|
||||
os.rename(file_new, file)
|
||||
try:
|
||||
@ -1459,14 +1468,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK')
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1537,13 +1547,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
|
||||
|
||||
os.rename(file_new, file)
|
||||
file = os.path.join(
|
||||
@ -1562,13 +1572,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'CORRUPT')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT')
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
Loading…
x
Reference in New Issue
Block a user