From ceaa2ce9cdf64b13f52df42f23a5640d339aeda5 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Mon, 21 May 2018 19:06:12 +0300 Subject: [PATCH 1/7] PGPRO-533: Allow to show backup list in json format --- src/backup.c | 24 ++- src/catalog.c | 64 +++++- src/help.c | 2 + src/pg_probackup.c | 63 +++--- src/pg_probackup.h | 20 +- src/show.c | 494 +++++++++++++++++++++++++++++++++++++-------- src/util.c | 13 +- src/utils/pgut.h | 2 +- 8 files changed, 532 insertions(+), 150 deletions(-) diff --git a/src/backup.c b/src/backup.c index 57c4be51..6a2333bd 100644 --- a/src/backup.c +++ b/src/backup.c @@ -633,7 +633,7 @@ do_backup_instance(void) * For backup from master wait for previous segment. * For backup from replica wait for current segment. */ - !from_replica, backup_files_list); + !current.from_replica, backup_files_list); } if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK) @@ -815,11 +815,15 @@ do_backup(time_t start_time) pgut_atexit_push(backup_disconnect, NULL); current.primary_conninfo = pgut_get_conninfo_string(backup_conn); + + current.compress_alg = compress_alg; + current.compress_level = compress_level; + /* Confirm data block size and xlog block size are compatible */ confirm_block_size("block_size", BLCKSZ); confirm_block_size("wal_block_size", XLOG_BLCKSZ); - from_replica = pg_is_in_recovery(); + current.from_replica = pg_is_in_recovery(); /* Confirm that this server version is supported */ check_server_version(); @@ -859,7 +863,7 @@ do_backup(time_t start_time) } } - if (from_replica) + if (current.from_replica) { /* Check master connection options */ if (master_host == NULL) @@ -956,7 +960,7 @@ check_server_version(void) "server version is %s, must be %s or higher", server_version_str, "9.5"); - if (from_replica && server_version < 90600) + if (current.from_replica && server_version < 90600) elog(ERROR, "server version is %s, must be %s or higher for backup from replica", server_version_str, "9.6"); @@ -1061,7 +1065,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup) params[0] = label; /* For replica we call pg_start_backup() on master */ - conn = (from_replica) ? master_conn : backup_conn; + conn = (backup->from_replica) ? master_conn : backup_conn; /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; @@ -1106,7 +1110,7 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup) } /* Wait for start_lsn to be replayed by replica */ - if (from_replica) + if (backup->from_replica) wait_replica_wal_lsn(backup->start_lsn, true); /* @@ -1554,8 +1558,6 @@ wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup) { uint32 try_count = 0; - Assert(from_replica); - while (true) { PGresult *res; @@ -1650,7 +1652,7 @@ pg_stop_backup(pgBackup *backup) elog(FATAL, "backup is not in progress"); /* For replica we call pg_stop_backup() on master */ - conn = (from_replica) ? master_conn : backup_conn; + conn = (current.from_replica) ? master_conn : backup_conn; /* Remove annoying NOTICE messages generated by backend */ res = pgut_execute(conn, "SET client_min_messages = warning;", @@ -1663,7 +1665,7 @@ pg_stop_backup(pgBackup *backup) const char *params[1]; char name[1024]; - if (!from_replica) + if (!current.from_replica) snprintf(name, lengthof(name), "pg_probackup, backup_id %s", base36enc(backup->start_time)); else @@ -1891,7 +1893,7 @@ pg_stop_backup(pgBackup *backup) stream_xlog_path[MAXPGPATH]; /* Wait for stop_lsn to be received by replica */ - if (from_replica) + if (backup->from_replica) wait_replica_wal_lsn(stop_backup_lsn, false); /* * Wait for stop_lsn to be archived or streamed. diff --git a/src/catalog.c b/src/catalog.c index f5884f01..6c9d36b5 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -385,10 +385,11 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) fprintf(out, "#Configuration\n"); fprintf(out, "backup-mode = %s\n", pgBackupGetBackupMode(backup)); - fprintf(out, "stream = %s\n", backup->stream?"true":"false"); - fprintf(out, "compress-alg = %s\n", deparse_compress_alg(compress_alg)); - fprintf(out, "compress-level = %d\n", compress_level); - fprintf(out, "from-replica = %s\n", from_replica?"true":"false"); + fprintf(out, "stream = %s\n", backup->stream ? "true" : "false"); + fprintf(out, "compress-alg = %s\n", + deparse_compress_alg(backup->compress_alg)); + fprintf(out, "compress-level = %d\n", backup->compress_level); + fprintf(out, "from-replica = %s\n", backup->from_replica ? "true" : "false"); fprintf(out, "\n#Compatibility\n"); fprintf(out, "block-size = %u\n", backup->block_size); @@ -429,7 +430,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) if (backup->data_bytes != BYTES_INVALID) fprintf(out, "data-bytes = " INT64_FORMAT "\n", backup->data_bytes); - if (backup->data_bytes != BYTES_INVALID) + if (backup->wal_bytes != BYTES_INVALID) fprintf(out, "wal-bytes = " INT64_FORMAT "\n", backup->wal_bytes); fprintf(out, "status = %s\n", status2str(backup->status)); @@ -475,10 +476,8 @@ readBackupControlFile(const char *path) char *stop_lsn = NULL; char *status = NULL; char *parent_backup = NULL; - char *compress_alg = NULL; char *server_version = NULL; - int *compress_level; - bool *from_replica; + char *compress_alg = NULL; pgut_option options[] = { @@ -500,8 +499,8 @@ readBackupControlFile(const char *path) {'s', 0, "status", &status, SOURCE_FILE_STRICT}, {'s', 0, "parent-backup-id", &parent_backup, SOURCE_FILE_STRICT}, {'s', 0, "compress-alg", &compress_alg, SOURCE_FILE_STRICT}, - {'u', 0, "compress-level", &compress_level, SOURCE_FILE_STRICT}, - {'b', 0, "from-replica", &from_replica, SOURCE_FILE_STRICT}, + {'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT}, + {'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT}, {'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT}, {0} }; @@ -578,6 +577,9 @@ readBackupControlFile(const char *path) pfree(server_version); } + if (compress_alg) + backup->compress_alg = parse_compress_alg(compress_alg); + return backup; } @@ -626,6 +628,48 @@ deparse_backup_mode(BackupMode mode) return NULL; } +CompressAlg +parse_compress_alg(const char *arg) +{ + size_t len; + + /* Skip all spaces detected */ + while (isspace((unsigned char)*arg)) + arg++; + len = strlen(arg); + + if (len == 0) + elog(ERROR, "compress algrorithm is empty"); + + if (pg_strncasecmp("zlib", arg, len) == 0) + return ZLIB_COMPRESS; + else if (pg_strncasecmp("pglz", arg, len) == 0) + return PGLZ_COMPRESS; + else if (pg_strncasecmp("none", arg, len) == 0) + return NONE_COMPRESS; + else + elog(ERROR, "invalid compress algorithm value \"%s\"", arg); + + return NOT_DEFINED_COMPRESS; +} + +const char* +deparse_compress_alg(int alg) +{ + switch (alg) + { + case NONE_COMPRESS: + case NOT_DEFINED_COMPRESS: + return "none"; + case ZLIB_COMPRESS: + return "zlib"; + case PGLZ_COMPRESS: + return "pglz"; + } + + return NULL; +} + /* free pgBackup object */ void pgBackupFree(void *backup) diff --git a/src/help.c b/src/help.c index 2f84b225..a41e8167 100644 --- a/src/help.c +++ b/src/help.c @@ -128,6 +128,7 @@ help_pg_probackup(void) printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME); printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_(" [--format=format]\n")); printf(_("\n %s delete -B backup-dir --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [--wal] [-i backup-id | --expired]\n")); @@ -362,6 +363,7 @@ help_show(void) printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name show info about specific intstance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); } static void diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 5d464171..efb8e6a3 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -47,7 +47,6 @@ char *replication_slot = NULL; /* backup options */ bool backup_logs = false; bool smooth_checkpoint; -bool from_replica = false; bool is_remote_backup = false; /* Wait timeout for WAL segment archiving */ uint32 archive_timeout = 300; /* default is 300 seconds */ @@ -83,7 +82,7 @@ uint32 retention_window = 0; /* compression options */ CompressAlg compress_alg = NOT_DEFINED_COMPRESS; int compress_level = DEFAULT_COMPRESS_LEVEL; -bool compress_shortcut = false; +bool compress_shortcut = false; /* other options */ char *instance_name; @@ -94,6 +93,9 @@ static char *wal_file_path; static char *wal_file_name; static bool file_overwrite = false; +/* show options */ +ShowFormat show_format = SHOW_PLAIN; + /* current settings */ pgBackup current; ProbackupSubcmd backup_subcmd; @@ -104,6 +106,7 @@ static void opt_backup_mode(pgut_option *opt, const char *arg); static void opt_log_level_console(pgut_option *opt, const char *arg); static void opt_log_level_file(pgut_option *opt, const char *arg); static void opt_compress_alg(pgut_option *opt, const char *arg); +static void opt_show_format(pgut_option *opt, const char *arg); static void compress_init(void); @@ -178,6 +181,8 @@ static pgut_option options[] = { 's', 160, "wal-file-path", &wal_file_path, SOURCE_CMDLINE }, { 's', 161, "wal-file-name", &wal_file_name, SOURCE_CMDLINE }, { 'b', 162, "overwrite", &file_overwrite, SOURCE_CMDLINE }, + /* show options */ + { 'f', 170, "format", opt_show_format, SOURCE_CMDLINE }, { 0 } }; @@ -517,49 +522,31 @@ opt_log_level_file(pgut_option *opt, const char *arg) log_level_file = parse_log_level(arg); } -CompressAlg -parse_compress_alg(const char *arg) +static void +opt_show_format(pgut_option *opt, const char *arg) { + const char *v = arg; size_t len; /* Skip all spaces detected */ - while (isspace((unsigned char)*arg)) - arg++; - len = strlen(arg); + while (IsSpace(*v)) + v++; + len = strlen(v); - if (len == 0) - elog(ERROR, "compress algrorithm is empty"); - - if (pg_strncasecmp("zlib", arg, len) == 0) - return ZLIB_COMPRESS; - else if (pg_strncasecmp("pglz", arg, len) == 0) - return PGLZ_COMPRESS; - else if (pg_strncasecmp("none", arg, len) == 0) - return NONE_COMPRESS; - else - elog(ERROR, "invalid compress algorithm value \"%s\"", arg); - - return NOT_DEFINED_COMPRESS; -} - -const char* -deparse_compress_alg(int alg) -{ - switch (alg) + if (len > 0) { - case NONE_COMPRESS: - case NOT_DEFINED_COMPRESS: - return "none"; - case ZLIB_COMPRESS: - return "zlib"; - case PGLZ_COMPRESS: - return "pglz"; + if (pg_strncasecmp("plain", v, len) == 0) + show_format = SHOW_PLAIN; + else if (pg_strncasecmp("json", v, len) == 0) + show_format = SHOW_JSON; + else + elog(ERROR, "Invalid show format \"%s\"", arg); } - - return NULL; + else + elog(ERROR, "Invalid show format \"%s\"", arg); } -void +static void opt_compress_alg(pgut_option *opt, const char *arg) { compress_alg = parse_compress_alg(arg); @@ -568,8 +555,8 @@ opt_compress_alg(pgut_option *opt, const char *arg) /* * Initialize compress and sanity checks for compress. */ -static -void compress_init(void) +static void +compress_init(void) { /* Default algorithm is zlib */ if (compress_shortcut) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 30df34ce..0fefef1b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -149,6 +149,12 @@ typedef enum ProbackupSubcmd SHOW_CONFIG } ProbackupSubcmd; +typedef enum ShowFormat +{ + SHOW_PLAIN, + SHOW_JSON +} ShowFormat; + /* special values of pgBackup fields */ #define INVALID_BACKUP_ID 0 /* backup ID is not provided by user */ @@ -214,6 +220,9 @@ typedef struct pgBackup /* Size of WAL files in archive needed to restore this backup */ int64 wal_bytes; + CompressAlg compress_alg; + int compress_level; + /* Fields needed for compatibility check */ uint32 block_size; uint32 wal_block_size; @@ -221,13 +230,14 @@ typedef struct pgBackup char server_version[100]; - bool stream; /* Was this backup taken in stream mode? + bool stream; /* Was this backup taken in stream mode? * i.e. does it include all needed WAL files? */ + bool from_replica; /* Was this backup taken from replica */ time_t parent_backup; /* Identifier of the previous backup. * Which is basic backup for this * incremental backup. */ - char *primary_conninfo; /* Connection parameters of the backup - * in the format suitable for recovery.conf */ + char *primary_conninfo; /* Connection parameters of the backup + * in the format suitable for recovery.conf */ } pgBackup; /* Recovery target for restore and validate subcommands */ @@ -310,7 +320,6 @@ extern char *replication_slot; /* backup options */ extern bool smooth_checkpoint; extern uint32 archive_timeout; -extern bool from_replica; extern bool is_remote_backup; extern const char *master_db; extern const char *master_host; @@ -348,6 +357,9 @@ extern const char* deparse_compress_alg(int alg); extern char *instance_name; extern uint64 system_identifier; +/* show options */ +extern ShowFormat show_format; + /* current settings */ extern pgBackup current; extern ProbackupSubcmd backup_subcmd; diff --git a/src/show.c b/src/show.c index b6eee867..b97d9427 100644 --- a/src/show.c +++ b/src/show.c @@ -3,28 +3,38 @@ * show.c: show backup information. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2017, Postgres Professional + * Portions Copyright (c) 2015-2018, Postgres Professional * *------------------------------------------------------------------------- */ #include "pg_probackup.h" + #include #include #include #include +#include "pqexpbuffer.h" -static void show_backup_list(FILE *out, parray *backup_list); -static void show_backup_detail(FILE *out, pgBackup *backup); -static int do_show_instance(time_t requested_backup_id); + +static void show_instance_start(void); +static void show_instance_end(void); +static void show_instance(time_t requested_backup_id, bool show_name); +static int show_backup(time_t requested_backup_id); + +static void show_instance_plain(parray *backup_list, bool show_name); +static void show_instance_json(parray *backup_list); + +static PQExpBufferData show_buf; +static bool first_instance = true; +static uint8 json_level = 0; int do_show(time_t requested_backup_id) { - - if (instance_name == NULL - && requested_backup_id != INVALID_BACKUP_ID) + if (instance_name == NULL && + requested_backup_id != INVALID_BACKUP_ID) elog(ERROR, "You must specify --instance to use --backup_id option"); if (instance_name == NULL) @@ -38,10 +48,12 @@ do_show(time_t requested_backup_id) join_path_components(path, backup_path, BACKUPS_DIR); dir = opendir(path); if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", + path, strerror(errno)); - errno = 0; - while ((dent = readdir(dir))) + show_instance_start(); + + while (errno = 0, (dent = readdir(dir)) != NULL) { char child[MAXPGPATH]; struct stat st; @@ -54,73 +66,47 @@ do_show(time_t requested_backup_id) join_path_components(child, path, dent->d_name); if (lstat(child, &st) == -1) - elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); + elog(ERROR, "Cannot stat file \"%s\": %s", + child, strerror(errno)); if (!S_ISDIR(st.st_mode)) continue; instance_name = dent->d_name; sprintf(backup_instance_path, "%s/%s/%s", backup_path, BACKUPS_DIR, instance_name); - fprintf(stdout, "\nBACKUP INSTANCE '%s'\n", instance_name); - do_show_instance(0); + + show_instance(INVALID_BACKUP_ID, true); } + + if (errno) + elog(ERROR, "Cannot read directory \"%s\": %s", + path, strerror(errno)); + + if (closedir(dir)) + elog(ERROR, "Cannot close directory \"%s\": %s", + path, strerror(errno)); + + show_instance_end(); + + return 0; + } + else if (requested_backup_id == INVALID_BACKUP_ID || + show_format == SHOW_JSON) + { + show_instance_start(); + show_instance(requested_backup_id, false); + show_instance_end(); + return 0; } else - return do_show_instance(requested_backup_id); -} - -/* - * If 'requested_backup_id' is INVALID_BACKUP_ID, show brief meta information - * about all backups in the backup instance. - * If valid backup id is passed, show detailed meta information - * about specified backup. - */ -static int -do_show_instance(time_t requested_backup_id) -{ - if (requested_backup_id != INVALID_BACKUP_ID) - { - pgBackup *backup; - - backup = read_backup(requested_backup_id); - if (backup == NULL) - { - elog(INFO, "Requested backup \"%s\" is not found.", - /* We do not need free base36enc's result, we exit anyway */ - base36enc(requested_backup_id)); - /* This is not error */ - return 0; - } - - show_backup_detail(stdout, backup); - - /* cleanup */ - pgBackupFree(backup); - - } - else - { - parray *backup_list; - - backup_list = catalog_get_backup_list(INVALID_BACKUP_ID); - if (backup_list == NULL) - elog(ERROR, "Failed to get backup list."); - - show_backup_list(stdout, backup_list); - - /* cleanup */ - parray_walk(backup_list, pgBackupFree); - parray_free(backup_list); - } - - return 0; + return show_backup(requested_backup_id); } static void pretty_size(int64 size, char *buf, size_t len) { - int exp = 0; + int exp = 0; /* minus means the size is invalid */ if (size < 0) @@ -219,16 +205,113 @@ get_parent_tli(TimeLineID child_tli) return result; } +/* + * Initialize instance visualization. + */ static void -show_backup_list(FILE *out, parray *backup_list) +show_instance_start(void) +{ + initPQExpBuffer(&show_buf); + + if (show_format == SHOW_PLAIN) + return; + + first_instance = true; + json_level = 0; + + appendPQExpBufferChar(&show_buf, '['); + json_level++; +} + +/* + * Finalize instance visualization. + */ +static void +show_instance_end(void) +{ + if (show_format == SHOW_JSON) + appendPQExpBufferStr(&show_buf, "\n]\n"); + + fputs(show_buf.data, stdout); + termPQExpBuffer(&show_buf); +} + +/* + * Show brief meta information about all backups in the backup instance. + */ +static void +show_instance(time_t requested_backup_id, bool show_name) +{ + parray *backup_list; + + backup_list = catalog_get_backup_list(requested_backup_id); + if (backup_list == NULL) + elog(ERROR, "Failed to get backup list."); + + if (show_format == SHOW_PLAIN) + show_instance_plain(backup_list, show_name); + else if (show_format == SHOW_JSON) + show_instance_json(backup_list); + else + elog(ERROR, "Invalid show format %d", (int) show_format); + + /* cleanup */ + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); +} + +/* + * Show detailed meta information about specified backup. + */ +static int +show_backup(time_t requested_backup_id) +{ + pgBackup *backup; + + backup = read_backup(requested_backup_id); + if (backup == NULL) + { + elog(INFO, "Requested backup \"%s\" is not found.", + /* We do not need free base36enc's result, we exit anyway */ + base36enc(requested_backup_id)); + /* This is not error */ + return 0; + } + + if (show_format == SHOW_PLAIN) + pgBackupWriteControl(stdout, backup); + else + elog(ERROR, "Invalid show format %d", (int) show_format); + + /* cleanup */ + pgBackupFree(backup); + + return 0; +} + +/* + * Plain output. + */ + +/* + * Show instance backups in plain format. + */ +static void +show_instance_plain(parray *backup_list, bool show_name) { int i; + if (show_name) + printfPQExpBuffer(&show_buf, "\nBACKUP INSTANCE '%s'\n", instance_name); + /* if you add new fields here, fix the header */ /* show header */ - fputs("============================================================================================================================================\n", out); - fputs(" Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n", out); - fputs("============================================================================================================================================\n", out); + appendPQExpBufferStr(&show_buf, + "============================================================================================================================================\n"); + appendPQExpBufferStr(&show_buf, + " Instance Version ID Recovery time Mode WAL Current/Parent TLI Time Data Start LSN Stop LSN Status \n"); + appendPQExpBufferStr(&show_buf, + "============================================================================================================================================\n"); for (i = 0; i < parray_num(backup_list); i++) { @@ -255,27 +338,270 @@ show_backup_list(FILE *out, parray *backup_list) /* Get parent timeline before printing */ parent_tli = get_parent_tli(backup->tli); - fprintf(out, " %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n", - instance_name, - (backup->server_version[0] ? backup->server_version : "----"), - base36enc(backup->start_time), - timestamp, - pgBackupGetBackupMode(backup), - backup->stream ? "STREAM": "ARCHIVE", - backup->tli, - parent_tli, - duration, - data_bytes_str, - (uint32) (backup->start_lsn >> 32), - (uint32) backup->start_lsn, - (uint32) (backup->stop_lsn >> 32), - (uint32) backup->stop_lsn, - status2str(backup->status)); + appendPQExpBuffer(&show_buf, + " %-11s %-8s %-6s %-22s %-6s %-7s %3d / %-3d %5s %6s %2X/%-8X %2X/%-8X %-8s\n", + instance_name, + (backup->server_version[0] ? backup->server_version : "----"), + base36enc(backup->start_time), + timestamp, + pgBackupGetBackupMode(backup), + backup->stream ? "STREAM": "ARCHIVE", + backup->tli, + parent_tli, + duration, + data_bytes_str, + (uint32) (backup->start_lsn >> 32), + (uint32) backup->start_lsn, + (uint32) (backup->stop_lsn >> 32), + (uint32) backup->stop_lsn, + status2str(backup->status)); + } +} + +/* + * Json output. + */ + +static void +json_add_indent(PQExpBuffer buf) +{ + uint8 i; + + if (json_level == 0) + return; + + appendPQExpBufferChar(buf, '\n'); + for (i = 0; i < json_level; i++) + appendPQExpBufferStr(buf, " "); +} + +typedef enum +{ + JT_BEGIN_ARRAY, + JT_END_ARRAY, + JT_BEGIN_OBJECT, + JT_END_OBJECT +} JsonToken; + +static void +json_add(PQExpBuffer buf, JsonToken type) +{ + switch (type) + { + case JT_BEGIN_ARRAY: + appendPQExpBufferChar(buf, '['); + json_level++; + break; + case JT_END_ARRAY: + json_level--; + if (json_level == 0) + appendPQExpBufferChar(buf, '\n'); + else + json_add_indent(buf); + appendPQExpBufferChar(buf, ']'); + break; + case JT_BEGIN_OBJECT: + json_add_indent(buf); + appendPQExpBufferChar(buf, '{'); + json_level++; + break; + case JT_END_OBJECT: + json_level--; + if (json_level == 0) + appendPQExpBufferChar(buf, '\n'); + else + json_add_indent(buf); + appendPQExpBufferChar(buf, '}'); + break; + default: + break; } } static void -show_backup_detail(FILE *out, pgBackup *backup) +json_add_escaped(PQExpBuffer buf, const char *str) { - pgBackupWriteControl(out, backup); + const char *p; + + appendPQExpBufferChar(buf, '"'); + for (p = str; *p; p++) + { + switch (*p) + { + case '\b': + appendPQExpBufferStr(buf, "\\b"); + break; + case '\f': + appendPQExpBufferStr(buf, "\\f"); + break; + case '\n': + appendPQExpBufferStr(buf, "\\n"); + break; + case '\r': + appendPQExpBufferStr(buf, "\\r"); + break; + case '\t': + appendPQExpBufferStr(buf, "\\t"); + break; + case '"': + appendPQExpBufferStr(buf, "\\\""); + break; + case '\\': + appendPQExpBufferStr(buf, "\\\\"); + break; + default: + if ((unsigned char) *p < ' ') + appendPQExpBuffer(buf, "\\u%04x", (int) *p); + else + appendPQExpBufferChar(buf, *p); + break; + } + } + appendPQExpBufferChar(buf, '"'); +} + +static void +json_add_key(PQExpBuffer buf, const char *name, bool add_comma) +{ + if (add_comma) + appendPQExpBufferChar(buf, ','); + json_add_indent(buf); + + json_add_escaped(buf, name); + appendPQExpBufferStr(buf, ": "); +} + +static void +json_add_value(PQExpBuffer buf, const char *name, const char *value, + bool add_comma) +{ + json_add_key(buf, name, add_comma); + json_add_escaped(buf, value); +} + +/* + * Show instance backups in json format. + */ +static void +show_instance_json(parray *backup_list) +{ + int i; + PQExpBuffer buf = &show_buf; + + if (!first_instance) + appendPQExpBufferChar(buf, ','); + + /* Begin of instance object */ + json_add(buf, JT_BEGIN_OBJECT); + + json_add_value(buf, "instance-name", instance_name, false); + + json_add_key(buf, "backups", true); + + /* + * List backups. + */ + json_add(buf, JT_BEGIN_ARRAY); + + for (i = 0; i < parray_num(backup_list); i++) + { + pgBackup *backup = parray_get(backup_list, i); + TimeLineID parent_tli; + char timestamp[100] = "----"; + char duration[20] = "----"; + char data_bytes_str[10] = "----"; + char lsn[20]; + + if (i != 0) + appendPQExpBufferChar(buf, ','); + + json_add(buf, JT_BEGIN_OBJECT); + + json_add_value(buf, "id", base36enc(backup->start_time), true); + + if (backup->parent_backup != 0) + json_add_value(buf, "parent-backup-id", + base36enc(backup->parent_backup), true); + + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), false); + + json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", true); + + json_add_value(buf, "compress-alg", + deparse_compress_alg(backup->compress_alg), true); + + json_add_key(buf, "compress-level", true); + appendPQExpBuffer(buf, "%d", backup->compress_level); + + json_add_value(buf, "from-replica", + backup->from_replica ? "true" : "false", true); + + json_add_key(buf, "block-size", true); + appendPQExpBuffer(buf, "%u", backup->block_size); + + json_add_key(buf, "xlog-block-size", true); + appendPQExpBuffer(buf, "%u", backup->wal_block_size); + + json_add_key(buf, "checksum-version", true); + appendPQExpBuffer(buf, "%u", backup->checksum_version); + + json_add_value(buf, "server-version", backup->server_version, true); + + json_add_key(buf, "current-tli", true); + appendPQExpBuffer(buf, "%d", backup->tli); + + json_add_key(buf, "parent-tli", true); + parent_tli = get_parent_tli(backup->tli); + appendPQExpBuffer(buf, "%u", parent_tli); + + snprintf(lsn, lengthof(lsn), "%X/%X", + (uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn); + json_add_value(buf, "start-lsn", lsn, true); + + snprintf(lsn, lengthof(lsn), "%X/%X", + (uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn); + json_add_value(buf, "stop-lsn", lsn, true); + + time2iso(timestamp, lengthof(timestamp), backup->start_time); + json_add_value(buf, "start-time", timestamp, true); + + time2iso(timestamp, lengthof(timestamp), backup->end_time); + json_add_value(buf, "end-time", timestamp, true); + + json_add_key(buf, "recovery-xid", true); + appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid); + + time2iso(timestamp, lengthof(timestamp), backup->recovery_time); + json_add_value(buf, "recovery-time", timestamp, true); + + pretty_size(backup->data_bytes, data_bytes_str, + lengthof(data_bytes_str)); + json_add_value(buf, "data-bytes", data_bytes_str, true); + + pretty_size(backup->wal_bytes, data_bytes_str, + lengthof(data_bytes_str)); + json_add_value(buf, "wal-bytes", data_bytes_str, true); + + if (backup->end_time != (time_t) 0) + { + snprintf(duration, lengthof(duration), "%.*lfs", 0, + difftime(backup->end_time, backup->start_time)); + json_add_value(buf, "time", duration, true); + } + + if (backup->primary_conninfo) + json_add_value(buf, "primary_conninfo", backup->primary_conninfo, true); + + json_add_value(buf, "status", status2str(backup->status), true); + + json_add(buf, JT_END_OBJECT); + } + + /* End of backups */ + json_add(buf, JT_END_ARRAY); + + /* End of instance object */ + json_add(buf, JT_END_OBJECT); + + first_instance = false; } diff --git a/src/util.c b/src/util.c index f2c84f6e..7aeba211 100644 --- a/src/util.c +++ b/src/util.c @@ -176,8 +176,8 @@ uint32 get_data_checksum_version(bool safe) { ControlFileData ControlFile; - char *buffer; - size_t size; + char *buffer; + size_t size; /* First fetch file... */ buffer = slurpFile(pgdata, "global/pg_control", &size, safe); @@ -310,10 +310,19 @@ pgBackup_init(pgBackup *backup) backup->end_time = (time_t) 0; backup->recovery_xid = 0; backup->recovery_time = (time_t) 0; + backup->data_bytes = BYTES_INVALID; + backup->wal_bytes = BYTES_INVALID; + + backup->compress_alg = NOT_DEFINED_COMPRESS; + backup->compress_level = 0; + backup->block_size = BLCKSZ; backup->wal_block_size = XLOG_BLCKSZ; + backup->checksum_version = 0; + backup->stream = false; + backup->from_replica = false; backup->parent_backup = 0; backup->primary_conninfo = NULL; backup->server_version[0] = '\0'; diff --git a/src/utils/pgut.h b/src/utils/pgut.h index a9003f2f..803d2c57 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -59,7 +59,7 @@ typedef enum pgut_optsrc typedef struct pgut_option { char type; - char sname; /* short name */ + uint8 sname; /* short name */ const char *lname; /* long name */ void *var; /* pointer to variable */ pgut_optsrc allowed; /* allowed source */ From 6df8c2aaec213d0e149644df53606ab307cda336 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Thu, 31 May 2018 20:31:12 +0300 Subject: [PATCH 2/7] PGPRO-533: Replace instance-name by instance, fix typos --- src/show.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/show.c b/src/show.c index b97d9427..98dab419 100644 --- a/src/show.c +++ b/src/show.c @@ -494,7 +494,7 @@ show_instance_json(parray *backup_list) /* Begin of instance object */ json_add(buf, JT_BEGIN_OBJECT); - json_add_value(buf, "instance-name", instance_name, false); + json_add_value(buf, "instance", instance_name, false); json_add_key(buf, "backups", true); @@ -517,13 +517,13 @@ show_instance_json(parray *backup_list) json_add(buf, JT_BEGIN_OBJECT); - json_add_value(buf, "id", base36enc(backup->start_time), true); + json_add_value(buf, "id", base36enc(backup->start_time), false); if (backup->parent_backup != 0) json_add_value(buf, "parent-backup-id", base36enc(backup->parent_backup), true); - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), false); + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", true); From 733354c40f0e7540d781ea9240b728494d8b3119 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Fri, 1 Jun 2018 17:09:32 +0300 Subject: [PATCH 3/7] PGPRO-533: Reformat json output, add program-version to backup.control --- src/catalog.c | 10 ++++++++ src/pg_probackup.h | 1 + src/show.c | 58 ++++++++++++++++++++++++++++++++-------------- src/util.c | 1 + 4 files changed, 52 insertions(+), 18 deletions(-) diff --git a/src/catalog.c b/src/catalog.c index 6c9d36b5..eb5d3be7 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -395,6 +395,7 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) fprintf(out, "block-size = %u\n", backup->block_size); fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size); fprintf(out, "checksum-version = %u\n", backup->checksum_version); + fprintf(out, "program-version = %s\n", PROGRAM_VERSION); if (backup->server_version[0] != '\0') fprintf(out, "server-version = %s\n", backup->server_version); @@ -476,6 +477,7 @@ readBackupControlFile(const char *path) char *stop_lsn = NULL; char *status = NULL; char *parent_backup = NULL; + char *program_version = NULL; char *server_version = NULL; char *compress_alg = NULL; @@ -494,6 +496,7 @@ readBackupControlFile(const char *path) {'u', 0, "block-size", &backup->block_size, SOURCE_FILE_STRICT}, {'u', 0, "xlog-block-size", &backup->wal_block_size, SOURCE_FILE_STRICT}, {'u', 0, "checksum-version", &backup->checksum_version, SOURCE_FILE_STRICT}, + {'s', 0, "program-version", &program_version, SOURCE_FILE_STRICT}, {'s', 0, "server-version", &server_version, SOURCE_FILE_STRICT}, {'b', 0, "stream", &backup->stream, SOURCE_FILE_STRICT}, {'s', 0, "status", &status, SOURCE_FILE_STRICT}, @@ -570,6 +573,13 @@ readBackupControlFile(const char *path) free(parent_backup); } + if (program_version) + { + StrNCpy(backup->program_version, program_version, + sizeof(backup->program_version)); + pfree(program_version); + } + if (server_version) { StrNCpy(backup->server_version, server_version, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 0fefef1b..d3ce8241 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -228,6 +228,7 @@ typedef struct pgBackup uint32 wal_block_size; uint32 checksum_version; + char program_version[100]; char server_version[100]; bool stream; /* Was this backup taken in stream mode? diff --git a/src/show.c b/src/show.c index 98dab419..03ae6488 100644 --- a/src/show.c +++ b/src/show.c @@ -26,6 +26,21 @@ static int show_backup(time_t requested_backup_id); static void show_instance_plain(parray *backup_list, bool show_name); static void show_instance_json(parray *backup_list); +/* Json output functions */ + +typedef enum +{ + JT_BEGIN_ARRAY, + JT_END_ARRAY, + JT_BEGIN_OBJECT, + JT_END_OBJECT +} JsonToken; + +static void json_add(PQExpBuffer buf, JsonToken type); +static void json_add_key(PQExpBuffer buf, const char *name, bool add_comma); +static void json_add_value(PQExpBuffer buf, const char *name, const char *value, + bool add_comma); + static PQExpBufferData show_buf; static bool first_instance = true; static uint8 json_level = 0; @@ -219,8 +234,9 @@ show_instance_start(void) first_instance = true; json_level = 0; - appendPQExpBufferChar(&show_buf, '['); - json_level++; + json_add(&show_buf, JT_BEGIN_OBJECT); + json_add_key(&show_buf, "instances", false); + json_add(&show_buf, JT_BEGIN_ARRAY); } /* @@ -230,7 +246,11 @@ static void show_instance_end(void) { if (show_format == SHOW_JSON) - appendPQExpBufferStr(&show_buf, "\n]\n"); + { + json_add(&show_buf, JT_END_ARRAY); + json_add(&show_buf, JT_END_OBJECT); + appendPQExpBufferChar(&show_buf, '\n'); + } fputs(show_buf.data, stdout); termPQExpBuffer(&show_buf); @@ -375,14 +395,6 @@ json_add_indent(PQExpBuffer buf) appendPQExpBufferStr(buf, " "); } -typedef enum -{ - JT_BEGIN_ARRAY, - JT_END_ARRAY, - JT_BEGIN_OBJECT, - JT_END_OBJECT -} JsonToken; - static void json_add(PQExpBuffer buf, JsonToken type) { @@ -493,10 +505,10 @@ show_instance_json(parray *backup_list) /* Begin of instance object */ json_add(buf, JT_BEGIN_OBJECT); + json_add_key(buf, instance_name, false); - json_add_value(buf, "instance", instance_name, false); - - json_add_key(buf, "backups", true); + json_add(buf, JT_BEGIN_OBJECT); + json_add_key(buf, "backups", false); /* * List backups. @@ -516,14 +528,19 @@ show_instance_json(parray *backup_list) appendPQExpBufferChar(buf, ','); json_add(buf, JT_BEGIN_OBJECT); + json_add_key(buf, base36enc(backup->start_time), false); - json_add_value(buf, "id", base36enc(backup->start_time), false); + /* Show backup attributes */ + json_add(buf, JT_BEGIN_OBJECT); if (backup->parent_backup != 0) + { json_add_value(buf, "parent-backup-id", - base36enc(backup->parent_backup), true); - - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); + base36enc(backup->parent_backup), false); + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); + } + else + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), false); json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", true); @@ -545,6 +562,7 @@ show_instance_json(parray *backup_list) json_add_key(buf, "checksum-version", true); appendPQExpBuffer(buf, "%u", backup->checksum_version); + json_add_value(buf, "program-version", backup->program_version, true); json_add_value(buf, "server-version", backup->server_version, true); json_add_key(buf, "current-tli", true); @@ -594,11 +612,15 @@ show_instance_json(parray *backup_list) json_add_value(buf, "status", status2str(backup->status), true); + json_add(buf, JT_END_OBJECT); + /* End of backup attributes */ + json_add(buf, JT_END_OBJECT); } /* End of backups */ json_add(buf, JT_END_ARRAY); + json_add(buf, JT_END_OBJECT); /* End of instance object */ json_add(buf, JT_END_OBJECT); diff --git a/src/util.c b/src/util.c index 7aeba211..a43239dc 100644 --- a/src/util.c +++ b/src/util.c @@ -325,5 +325,6 @@ pgBackup_init(pgBackup *backup) backup->from_replica = false; backup->parent_backup = 0; backup->primary_conninfo = NULL; + backup->program_version[0] = '\0'; backup->server_version[0] = '\0'; } From 8aa559b17b24bc1d91f12df6df2362373fc372dd Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Fri, 1 Jun 2018 17:26:56 +0300 Subject: [PATCH 4/7] PGPRO-533: Reformat json output --- src/show.c | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/src/show.c b/src/show.c index 03ae6488..a39ba528 100644 --- a/src/show.c +++ b/src/show.c @@ -234,9 +234,8 @@ show_instance_start(void) first_instance = true; json_level = 0; - json_add(&show_buf, JT_BEGIN_OBJECT); - json_add_key(&show_buf, "instances", false); - json_add(&show_buf, JT_BEGIN_ARRAY); + appendPQExpBufferChar(&show_buf, '['); + json_level++; } /* @@ -246,11 +245,7 @@ static void show_instance_end(void) { if (show_format == SHOW_JSON) - { - json_add(&show_buf, JT_END_ARRAY); - json_add(&show_buf, JT_END_OBJECT); - appendPQExpBufferChar(&show_buf, '\n'); - } + appendPQExpBufferStr(&show_buf, "\n]\n"); fputs(show_buf.data, stdout); termPQExpBuffer(&show_buf); @@ -505,10 +500,9 @@ show_instance_json(parray *backup_list) /* Begin of instance object */ json_add(buf, JT_BEGIN_OBJECT); - json_add_key(buf, instance_name, false); - json_add(buf, JT_BEGIN_OBJECT); - json_add_key(buf, "backups", false); + json_add_value(buf, "instance", instance_name, false); + json_add_key(buf, "backups", true); /* * List backups. @@ -528,19 +522,14 @@ show_instance_json(parray *backup_list) appendPQExpBufferChar(buf, ','); json_add(buf, JT_BEGIN_OBJECT); - json_add_key(buf, base36enc(backup->start_time), false); - /* Show backup attributes */ - json_add(buf, JT_BEGIN_OBJECT); + json_add_value(buf, "id", base36enc(backup->start_time), false); if (backup->parent_backup != 0) - { json_add_value(buf, "parent-backup-id", - base36enc(backup->parent_backup), false); - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); - } - else - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), false); + base36enc(backup->parent_backup), true); + + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", true); @@ -612,15 +601,11 @@ show_instance_json(parray *backup_list) json_add_value(buf, "status", status2str(backup->status), true); - json_add(buf, JT_END_OBJECT); - /* End of backup attributes */ - json_add(buf, JT_END_OBJECT); } /* End of backups */ json_add(buf, JT_END_ARRAY); - json_add(buf, JT_END_OBJECT); /* End of instance object */ json_add(buf, JT_END_OBJECT); From 191d5e30e98d25a70f17ace622b4626f670e9b6d Mon Sep 17 00:00:00 2001 From: Grigory Smolkin Date: Sat, 2 Jun 2018 20:35:37 +0300 Subject: [PATCH 5/7] tests: json format for show command --- tests/archive.py | 14 +++- tests/backup_test.py | 38 +++++----- tests/delete_test.py | 30 ++++---- tests/delta.py | 4 +- tests/exclude.py | 18 +++-- tests/helpers/ptrack_helpers.py | 121 +++++++++++++++++++------------- tests/pgpro589.py | 2 +- tests/ptrack.py | 103 ++++++++++++++++++--------- tests/restore_test.py | 18 ++--- tests/retention_test.py | 12 ++-- tests/show_test.py | 29 ++++++++ tests/validate_test.py | 84 ++++++++++++---------- 12 files changed, 292 insertions(+), 181 deletions(-) diff --git a/tests/archive.py b/tests/archive.py index c0408360..4e1f39d8 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -427,7 +427,11 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # @unittest.skip("skip") def test_replica_archive(self): - """make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica""" + """ + make node without archiving, take stream backup and + turn it into replica, set replica with archiving, + make archive backup from replica + """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -468,7 +472,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): after = replica.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(before, after) - # Change data on master, take FULL backup from replica, restore taken backup and check that restored data equal to original data + # Change data on master, take FULL backup from replica, + # restore taken backup and check that restored data equal + # to original data master.psql( "postgres", "insert into t_heap as select i as id, md5(i::text) as text, " @@ -502,7 +508,9 @@ class ArchiveTest(ProbackupTest, unittest.TestCase): after = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(before, after) - # Change data on master, make PAGE backup from replica, restore taken backup and check that restored data equal to original data + # Change data on master, make PAGE backup from replica, + # restore taken backup and check that restored data equal + # to original data master.psql( "postgres", "insert into t_heap as select i as id, md5(i::text) as text, " diff --git a/tests/backup_test.py b/tests/backup_test.py index 5af59684..1fa74643 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -29,15 +29,11 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.set_archiving(backup_dir, 'node', node) node.start() - # full backup mode - # with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log: - # backup_log.write(self.backup_node(node, options=["--verbose"])) - backup_id = self.backup_node(backup_dir, 'node', node) show_backup = self.show_pb(backup_dir, 'node')[0] - self.assertEqual(show_backup['Status'], "OK") - self.assertEqual(show_backup['Mode'], "FULL") + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") # postmaster.pid and postmaster.opts shouldn't be copied excluded = True @@ -61,29 +57,29 @@ class BackupTest(ProbackupTest, unittest.TestCase): # print self.show_pb(node) show_backup = self.show_pb(backup_dir, 'node')[1] - self.assertEqual(show_backup['Status'], "OK") - self.assertEqual(show_backup['Mode'], "PAGE") + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") # Check parent backup self.assertEqual( backup_id, self.show_pb( backup_dir, 'node', - backup_id=show_backup['ID'])["parent-backup-id"]) + backup_id=show_backup['id'])["parent-backup-id"]) # ptrack backup mode self.backup_node(backup_dir, 'node', node, backup_type="ptrack") show_backup = self.show_pb(backup_dir, 'node')[2] - self.assertEqual(show_backup['Status'], "OK") - self.assertEqual(show_backup['Mode'], "PTRACK") + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PTRACK") # Check parent backup self.assertEqual( page_backup_id, self.show_pb( backup_dir, 'node', - backup_id=show_backup['ID'])["parent-backup-id"]) + backup_id=show_backup['id'])["parent-backup-id"]) # Clean after yourself self.del_test_dir(module_name, fname) @@ -106,7 +102,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.backup_node( backup_dir, 'node', node, options=["-C"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") node.stop() # Clean after yourself @@ -162,7 +158,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): repr(e.message), self.cmd)) self.assertEqual( - self.show_pb(backup_dir, 'node')[0]['Status'], + self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") # Clean after yourself @@ -227,7 +223,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.assertEqual( self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT") self.assertEqual( - self.show_pb(backup_dir, 'node')[1]['Status'], "ERROR") + self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") # Clean after yourself self.del_test_dir(module_name, fname) @@ -250,12 +246,12 @@ class BackupTest(ProbackupTest, unittest.TestCase): self.backup_node( backup_dir, 'node', node, backup_type="full", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") self.backup_node( backup_dir, 'node', node, backup_type="ptrack", options=["-j", "4"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") # Clean after yourself self.del_test_dir(module_name, fname) @@ -282,11 +278,11 @@ class BackupTest(ProbackupTest, unittest.TestCase): backup_dir, 'node', node, backup_type="full", options=["-j", "4", "--stream"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") self.backup_node( backup_dir, 'node', node, backup_type="ptrack", options=["-j", "4", "--stream"]) - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") # Clean after yourself self.del_test_dir(module_name, fname) @@ -342,7 +338,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): f.close self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK', + self.show_pb(backup_dir, 'node')[1]['status'] == 'OK', "Backup Status should be OK") # Clean after yourself @@ -415,7 +411,7 @@ class BackupTest(ProbackupTest, unittest.TestCase): repr(e.message), self.cmd)) self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR', + self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR', "Backup Status should be ERROR") # Clean after yourself diff --git a/tests/delete_test.py b/tests/delete_test.py index 039f3606..4afb15ae 100644 --- a/tests/delete_test.py +++ b/tests/delete_test.py @@ -44,13 +44,13 @@ class DeleteTest(ProbackupTest, unittest.TestCase): self.backup_node(backup_dir, 'node', node) show_backups = self.show_pb(backup_dir, 'node') - id_1 = show_backups[0]['ID'] - id_2 = show_backups[1]['ID'] - id_3 = show_backups[2]['ID'] + id_1 = show_backups[0]['id'] + id_2 = show_backups[1]['id'] + id_3 = show_backups[2]['id'] self.delete_pb(backup_dir, 'node', id_2) show_backups = self.show_pb(backup_dir, 'node') - self.assertEqual(show_backups[0]['ID'], id_1) - self.assertEqual(show_backups[1]['ID'], id_3) + self.assertEqual(show_backups[0]['id'], id_1) + self.assertEqual(show_backups[1]['id'], id_3) # Clean after yourself self.del_test_dir(module_name, fname) @@ -82,15 +82,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase): self.assertEqual(len(show_backups), 4) # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['ID']) + self.delete_pb(backup_dir, 'node', show_backups[1]['id']) show_backups = self.show_pb(backup_dir, 'node') self.assertEqual(len(show_backups), 2) - self.assertEqual(show_backups[0]['Mode'], "FULL") - self.assertEqual(show_backups[0]['Status'], "OK") - self.assertEqual(show_backups[1]['Mode'], "FULL") - self.assertEqual(show_backups[1]['Status'], "OK") + self.assertEqual(show_backups[0]['backup-mode'], "FULL") + self.assertEqual(show_backups[0]['status'], "OK") + self.assertEqual(show_backups[1]['backup-mode'], "FULL") + self.assertEqual(show_backups[1]['status'], "OK") # Clean after yourself self.del_test_dir(module_name, fname) @@ -122,15 +122,15 @@ class DeleteTest(ProbackupTest, unittest.TestCase): self.assertEqual(len(show_backups), 4) # delete first page backup - self.delete_pb(backup_dir, 'node', show_backups[1]['ID']) + self.delete_pb(backup_dir, 'node', show_backups[1]['id']) show_backups = self.show_pb(backup_dir, 'node') self.assertEqual(len(show_backups), 2) - self.assertEqual(show_backups[0]['Mode'], "FULL") - self.assertEqual(show_backups[0]['Status'], "OK") - self.assertEqual(show_backups[1]['Mode'], "FULL") - self.assertEqual(show_backups[1]['Status'], "OK") + self.assertEqual(show_backups[0]['backup-mode'], "FULL") + self.assertEqual(show_backups[0]['status'], "OK") + self.assertEqual(show_backups[1]['backup-mode'], "FULL") + self.assertEqual(show_backups[1]['status'], "OK") # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/delta.py b/tests/delta.py index 303c7c58..7cf21758 100644 --- a/tests/delta.py +++ b/tests/delta.py @@ -1191,7 +1191,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase): f.close self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK', + self.show_pb(backup_dir, 'node')[1]['status'] == 'OK', "Backup Status should be OK") # Clean after yourself @@ -1264,7 +1264,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase): repr(e.message), self.cmd)) self.assertTrue( - self.show_pb(backup_dir, 'node')[1]['Status'] == 'ERROR', + self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR', "Backup Status should be ERROR") # Clean after yourself diff --git a/tests/exclude.py b/tests/exclude.py index 763060c7..4f9d73ab 100644 --- a/tests/exclude.py +++ b/tests/exclude.py @@ -34,25 +34,33 @@ class ExcludeTest(ProbackupTest, unittest.TestCase): temp_schema_name = conn.execute("SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema()")[0][0] conn.commit() - temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace("pg_", "") + temp_toast_schema_name = "pg_toast_" + temp_schema_name.replace( + "pg_", "") conn.commit() conn.execute("create index test_idx on test (generate_series)") conn.commit() - heap_path = conn.execute("select pg_relation_filepath('test')")[0][0] + heap_path = conn.execute( + "select pg_relation_filepath('test')")[0][0] conn.commit() - index_path = conn.execute("select pg_relation_filepath('test_idx')")[0][0] + index_path = conn.execute( + "select pg_relation_filepath('test_idx')")[0][0] conn.commit() heap_oid = conn.execute("select 'test'::regclass::oid")[0][0] conn.commit() - toast_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0] + toast_path = conn.execute( + "select pg_relation_filepath('{0}.{1}')".format( + temp_toast_schema_name, "pg_toast_" + str(heap_oid)))[0][0] conn.commit() - toast_idx_path = conn.execute("select pg_relation_filepath('{0}.{1}')".format(temp_toast_schema_name, "pg_toast_" + str(heap_oid) + "_index"))[0][0] + toast_idx_path = conn.execute( + "select pg_relation_filepath('{0}.{1}')".format( + temp_toast_schema_name, + "pg_toast_" + str(heap_oid) + "_index"))[0][0] conn.commit() temp_table_filename = os.path.basename(heap_path) diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index a776599c..af7fe766 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -12,6 +12,7 @@ import select import psycopg2 from time import sleep import re +import json idx_ptrack = { 't_heap': { @@ -598,7 +599,7 @@ class ProbackupTest(object): def show_pb( self, backup_dir, instance=None, backup_id=None, - options=[], as_text=False + options=[], as_text=False, as_json=True ): backup_list = [] @@ -613,63 +614,83 @@ class ProbackupTest(object): if backup_id: cmd_list += ["-i", backup_id] + if as_json: + cmd_list += ["--format=json"] + if as_text: # You should print it when calling as_text=true return self.run_pb(cmd_list + options) # get show result as list of lines - show_splitted = self.run_pb(cmd_list + options).splitlines() - if instance is not None and backup_id is None: - # cut header(ID, Mode, etc) from show as single string - header = show_splitted[1:2][0] - # cut backup records from show as single list - # with string for every backup record - body = show_splitted[3:] - # inverse list so oldest record come first - body = body[::-1] - # split string in list with string for every header element - header_split = re.split(" +", header) - # Remove empty items - for i in header_split: - if i == '': - header_split.remove(i) + if as_json: + data = json.loads(self.run_pb(cmd_list + options)) + # print(data) + for instance_data in data: + # find specific instance if requested + if instance and instance_data['instance'] != instance: continue - header_split = [ - header_element.rstrip() for header_element in header_split - ] - for backup_record in body: - backup_record = backup_record.rstrip() - # split list with str for every backup record element - backup_record_split = re.split(" +", backup_record) - # Remove empty items - for i in backup_record_split: - if i == '': - backup_record_split.remove(i) - if len(header_split) != len(backup_record_split): - print(warning.format( - header=header, body=body, - header_split=header_split, - body_split=backup_record_split) - ) - exit(1) - new_dict = dict(zip(header_split, backup_record_split)) - backup_list.append(new_dict) + + for backup in reversed(instance_data['backups']): + # find specific backup if requested + if backup_id: + if backup['id'] == backup_id: + return backup + else: + backup_list.append(backup) return backup_list else: - # cut out empty lines and lines started with # - # and other garbage then reconstruct it as dictionary - # print show_splitted - sanitized_show = [item for item in show_splitted if item] - sanitized_show = [ - item for item in sanitized_show if not item.startswith('#') - ] - # print sanitized_show - for line in sanitized_show: - name, var = line.partition(" = ")[::2] - var = var.strip('"') - var = var.strip("'") - specific_record[name.strip()] = var - return specific_record + show_splitted = self.run_pb(cmd_list + options).splitlines() + if instance is not None and backup_id is None: + # cut header(ID, Mode, etc) from show as single string + header = show_splitted[1:2][0] + # cut backup records from show as single list + # with string for every backup record + body = show_splitted[3:] + # inverse list so oldest record come first + body = body[::-1] + # split string in list with string for every header element + header_split = re.split(" +", header) + # Remove empty items + for i in header_split: + if i == '': + header_split.remove(i) + continue + header_split = [ + header_element.rstrip() for header_element in header_split + ] + for backup_record in body: + backup_record = backup_record.rstrip() + # split list with str for every backup record element + backup_record_split = re.split(" +", backup_record) + # Remove empty items + for i in backup_record_split: + if i == '': + backup_record_split.remove(i) + if len(header_split) != len(backup_record_split): + print(warning.format( + header=header, body=body, + header_split=header_split, + body_split=backup_record_split) + ) + exit(1) + new_dict = dict(zip(header_split, backup_record_split)) + backup_list.append(new_dict) + return backup_list + else: + # cut out empty lines and lines started with # + # and other garbage then reconstruct it as dictionary + # print show_splitted + sanitized_show = [item for item in show_splitted if item] + sanitized_show = [ + item for item in sanitized_show if not item.startswith('#') + ] + # print sanitized_show + for line in sanitized_show: + name, var = line.partition(" = ")[::2] + var = var.strip('"') + var = var.strip("'") + specific_record[name.strip()] = var + return specific_record def validate_pb( self, backup_dir, instance=None, diff --git a/tests/pgpro589.py b/tests/pgpro589.py index a67f3dd4..bd40f16d 100644 --- a/tests/pgpro589.py +++ b/tests/pgpro589.py @@ -63,7 +63,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - backup_id = self.show_pb(backup_dir, 'node')[0]['ID'] + backup_id = self.show_pb(backup_dir, 'node')[0]['id'] self.assertEqual( 'ERROR', self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup should have ERROR status') diff --git a/tests/ptrack.py b/tests/ptrack.py index 9c21ff55..4823acef 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -268,7 +268,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # Physical comparison if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) node_restored.append_conf( @@ -430,7 +431,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # Physical comparison if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) node_restored.append_conf( @@ -503,7 +505,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # Physical comparison if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) node.start() @@ -584,8 +587,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): repr(self.output), self.cmd) ) node.start() - while node.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + while node.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(full_result, full_result_new) @@ -604,12 +607,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase): ) if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) node.start() - while node.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + while node.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(ptrack_result, ptrack_result_new) @@ -688,9 +692,11 @@ class PtrackTest(ProbackupTest, unittest.TestCase): repr(self.output), self.cmd) ) node.start() - while node.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + + while node.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) + full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -711,12 +717,13 @@ class PtrackTest(ProbackupTest, unittest.TestCase): ) if self.paranoia: - pgdata_restored = self.pgdata_content(node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) node.start() - while node.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + while node.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") self.assertEqual(ptrack_result, ptrack_result_new) @@ -811,7 +818,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_page_pgpro417(self): - """Make archive node, take full backup, take page backup, delete page backup. Try to take ptrack backup, which should fail""" + """ + Make archive node, take full backup, take page backup, + delete page backup. Try to take ptrack backup, which should fail + """ self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -880,7 +890,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_full_pgpro417(self): - """Make node, take two full backups, delete full second backup. Try to take ptrack backup, which should fail""" + """ + Make node, take two full backups, delete full second backup. + Try to take ptrack backup, which should fail + """ self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -954,7 +967,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_create_db(self): - """Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense""" + """ + Make node, take full backup, create database db1, take ptrack backup, + restore database and check it presense + """ self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1017,7 +1033,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # COMPARE PHYSICAL CONTENT if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE @@ -1046,7 +1063,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # COMPARE PHYSICAL CONTENT if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE @@ -1151,7 +1169,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # GET RESTORED PGDATA AND COMPARE if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE @@ -1159,8 +1178,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): 'postgresql.auto.conf', 'port = {0}'.format(node_restored.port)) node_restored.start() - while node_restored.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + while node_restored.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) result_new = node_restored.safe_psql( "postgres", "select * from t_heap") @@ -1229,7 +1248,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT if self.paranoia: - pgdata_restored = self.pgdata_content(node_restored.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + node_restored.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE @@ -1240,7 +1260,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_drop_tablespace(self): - """Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup""" + """ + Make node, create table, alter table tablespace, take ptrack backup, + move table from tablespace, take ptrack backup + """ self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1321,7 +1344,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_ptrack_alter_tablespace(self): - """Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup""" + """ + Make node, create table, alter table tablespace, take ptrack backup, + move table from tablespace, take ptrack backup + """ self.maxDiff = None fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') @@ -1379,15 +1405,16 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT if self.paranoia: - pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE restored_node.append_conf( "postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.start() - while restored_node.safe_psql( - "postgres", "select pg_is_in_recovery()") == 't\n': + while restored_node.psql( + "postgres", "select pg_is_in_recovery()")[0] != 0: time.sleep(1) # COMPARE LOGICAL CONTENT @@ -1416,14 +1443,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT if self.paranoia: - pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE restored_node.append_conf( "postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.start() - while restored_node.safe_psql( + while restored_node.psql( "postgres", "select pg_is_in_recovery()") == 't\n': time.sleep(1) @@ -1437,7 +1465,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_ptrack_multiple_segments(self): - """Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup""" + """ + Make node, create table, alter table tablespace, + take ptrack backup, move table from tablespace, take ptrack backup + """ fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') node = self.make_simple_node( @@ -1446,9 +1477,10 @@ class PtrackTest(ProbackupTest, unittest.TestCase): initdb_params=['--data-checksums'], pg_options={ 'wal_level': 'replica', 'max_wal_senders': '2', - 'ptrack_enable': 'on', 'fsync': 'off', 'shared_buffers': '128MB', - 'maintenance_work_mem': '1GB', 'autovacuum': 'off', - 'full_page_writes': 'off'} + 'ptrack_enable': 'on', 'fsync': 'off', + 'autovacuum': 'off', + 'full_page_writes': 'off' + } ) self.init_pb(backup_dir) @@ -1514,14 +1546,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase): # GET PHYSICAL CONTENT FROM NODE_RESTORED if self.paranoia: - pgdata_restored = self.pgdata_content(restored_node.data_dir, ignore_ptrack=False) + pgdata_restored = self.pgdata_content( + restored_node.data_dir, ignore_ptrack=False) self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE restored_node.append_conf( "postgresql.auto.conf", "port = {0}".format(restored_node.port)) restored_node.start() - while restored_node.safe_psql( + while restored_node.psql( "postgres", "select pg_is_in_recovery()") == 't\n': time.sleep(1) diff --git a/tests/restore_test.py b/tests/restore_test.py index 4567f37b..fce96911 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -718,7 +718,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): con.commit() backup_id = self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") # 1 - Try to restore to existing directory node.stop() @@ -785,8 +785,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase): backup_dir, 'node', node, backup_type="page") show_pb = self.show_pb(backup_dir, 'node') - self.assertEqual(show_pb[1]['Status'], "OK") - self.assertEqual(show_pb[2]['Status'], "OK") + self.assertEqual(show_pb[1]['status'], "OK") + self.assertEqual(show_pb[2]['status'], "OK") node.stop() node.cleanup() @@ -829,7 +829,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # Full backup self.backup_node(backup_dir, 'node', node) - self.assertEqual(self.show_pb(backup_dir, 'node')[0]['Status'], "OK") + self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") # Create tablespace tblspc_path = os.path.join(node.base_dir, "tblspc") @@ -845,8 +845,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # First page backup self.backup_node(backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node')[1]['Mode'], "PAGE") + self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") + self.assertEqual( + self.show_pb(backup_dir, 'node')[1]['backup-mode'], "PAGE") # Create tablespace table with node.connect("postgres") as con: @@ -862,8 +863,9 @@ class RestoreTest(ProbackupTest, unittest.TestCase): # Second page backup backup_id = self.backup_node( backup_dir, 'node', node, backup_type="page") - self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Status'], "OK") - self.assertEqual(self.show_pb(backup_dir, 'node')[2]['Mode'], "PAGE") + self.assertEqual(self.show_pb(backup_dir, 'node')[2]['status'], "OK") + self.assertEqual( + self.show_pb(backup_dir, 'node')[2]['backup-mode'], "PAGE") node.stop() node.cleanup() diff --git a/tests/retention_test.py b/tests/retention_test.py index 2d4cac37..652f7c39 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -14,7 +14,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): def test_retention_redundancy_1(self): """purge backups using redundancy-based retention policy""" fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), initdb_params=['--data-checksums'], pg_options={'wal_level': 'replica'} ) @@ -24,7 +25,9 @@ class RetentionTest(ProbackupTest, unittest.TestCase): self.set_archiving(backup_dir, 'node', node) node.start() - with open(os.path.join(backup_dir, 'backups', 'node', "pg_probackup.conf"), "a") as conf: + with open(os.path.join( + backup_dir, 'backups', 'node', + "pg_probackup.conf"), "a") as conf: conf.write("retention-redundancy = 1\n") # Make backups to be purged @@ -57,7 +60,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase): for wal_name in os.listdir(os.path.join(backup_dir, 'wal', 'node')): if not wal_name.endswith(".backup"): - #wal_name_b = wal_name.encode('ascii') + # wal_name_b = wal_name.encode('ascii') self.assertEqual(wal_name[8:] > min_wal[8:], True) self.assertEqual(wal_name[8:] > max_wal[8:], True) @@ -68,7 +71,8 @@ class RetentionTest(ProbackupTest, unittest.TestCase): def test_retention_window_2(self): """purge backups using window-based retention policy""" fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname), + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), initdb_params=['--data-checksums'], pg_options={'wal_level': 'replica'} ) diff --git a/tests/show_test.py b/tests/show_test.py index 74bd0341..29d0bdb3 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -36,6 +36,35 @@ class OptionTest(ProbackupTest, unittest.TestCase): # Clean after yourself self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_show_json(self): + """Status DONE and OK""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir="{0}/{1}/node".format(module_name, fname), + initdb_params=['--data-checksums'], + pg_options={'wal_level': 'replica'} + ) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.start() + + self.assertEqual( + self.backup_node( + backup_dir, 'node', node, + options=["--log-level-console=panic"]), + None + ) + self.backup_node(backup_dir, 'node', node) + self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + # @unittest.skip("skip") def test_corrupt_2(self): """Status CORRUPT""" diff --git a/tests/validate_test.py b/tests/validate_test.py index 06ea1ea3..afb2305f 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -908,7 +908,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase): backup_id = self.backup_node(backup_dir, 'node', node) target_xid = None with node.connect("postgres") as con: - res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") + res = con.execute( + "INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") con.commit() target_xid = res[0][0] @@ -1041,7 +1042,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_validate_corrupt_wal_between_backups(self): - """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" + """ + make archive node, make full backup, corrupt all wal files, + run validate to real xid, expect errors + """ fname = self.id().split('.')[3] node = self.make_simple_node( base_dir="{0}/{1}/node".format(module_name, fname), @@ -1083,7 +1087,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): else: walfile = node.safe_psql( 'postgres', - 'select pg_walfile_name(pg_current_wal_location())').rstrip() + 'select pg_walfile_name(pg_current_wal_lsn())').rstrip() if self.archive_compress: walfile = walfile + '.gz' @@ -1134,12 +1138,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase): self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[0]['Status'], + self.show_pb(backup_dir, 'node')[0]['status'], 'Backup STATUS should be "OK"') self.assertEqual( 'OK', - self.show_pb(backup_dir, 'node')[1]['Status'], + self.show_pb(backup_dir, 'node')[1]['status'], 'Backup STATUS should be "OK"') # Clean after yourself @@ -1208,7 +1212,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): self.assertEqual( 'ERROR', - self.show_pb(backup_dir, 'node')[1]['Status'], + self.show_pb(backup_dir, 'node')[1]['status'], 'Backup {0} should have STATUS "ERROR"') # Clean after yourself @@ -1405,7 +1409,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase): except ProbackupException as e: pass self.assertTrue( - self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR') + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') self.set_archiving(backup_dir, 'node', node) node.reload() self.backup_node(backup_dir, 'node', node, backup_type='page') @@ -1440,14 +1444,19 @@ class ValidateTest(ProbackupTest, unittest.TestCase): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue( + self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') + self.assertTrue( + self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue( + self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue( + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.assertTrue( + self.show_pb(backup_dir, 'node')[7]['status'] == 'ORPHAN') os.rename(file_new, file) try: @@ -1459,14 +1468,15 @@ class ValidateTest(ProbackupTest, unittest.TestCase): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ERROR') - self.assertTrue(self.show_pb(backup_dir, 'node')[7]['Status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'OK') + self.assertTrue( + self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') + self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') # Clean after yourself self.del_test_dir(module_name, fname) @@ -1537,13 +1547,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'ORPHAN') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') os.rename(file_new, file) file = os.path.join( @@ -1562,13 +1572,13 @@ class ValidateTest(ProbackupTest, unittest.TestCase): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.assertTrue(self.show_pb(backup_dir, 'node')[0]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[1]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[2]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[3]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[4]['Status'] == 'OK') - self.assertTrue(self.show_pb(backup_dir, 'node')[5]['Status'] == 'CORRUPT') - self.assertTrue(self.show_pb(backup_dir, 'node')[6]['Status'] == 'ORPHAN') + self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[2]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[3]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[4]['status'] == 'OK') + self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') + self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') # Clean after yourself self.del_test_dir(module_name, fname) From c71151d3df99b5b656190da7225e48ab82c01d7c Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Wed, 6 Jun 2018 14:07:10 +0300 Subject: [PATCH 6/7] PGPRO-533: Add json format for show-config --- Makefile | 3 +- src/configure.c | 162 ++++++++++++++++++++++++++++++++- src/show.c | 232 ++++++++++++----------------------------------- src/utils/json.c | 134 +++++++++++++++++++++++++++ src/utils/json.h | 33 +++++++ 5 files changed, 388 insertions(+), 176 deletions(-) create mode 100644 src/utils/json.c create mode 100644 src/utils/json.h diff --git a/Makefile b/Makefile index 0f4fa672..9880e5db 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,8 @@ OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \ src/pg_probackup.o src/restore.o src/show.o src/status.o \ src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \ src/xlogreader.o src/streamutil.o src/receivelog.o \ - src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o + src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \ + src/utils/json.o EXTRA_CLEAN = src/datapagemap.c src/datapagemap.h src/xlogreader.c \ src/receivelog.c src/receivelog.h src/streamutil.c src/streamutil.h src/logging.h diff --git a/src/configure.c b/src/configure.c index 55d2bbc5..0410dc74 100644 --- a/src/configure.c +++ b/src/configure.c @@ -2,19 +2,33 @@ * * configure.c: - manage backup catalog. * - * Copyright (c) 2017-2017, Postgres Professional + * Copyright (c) 2017-2018, Postgres Professional * *------------------------------------------------------------------------- */ #include "pg_probackup.h" +#include "pqexpbuffer.h" + +#include "utils/json.h" + + static void opt_log_level_console(pgut_option *opt, const char *arg); static void opt_log_level_file(pgut_option *opt, const char *arg); static void opt_compress_alg(pgut_option *opt, const char *arg); +static void show_configure_start(void); +static void show_configure_end(void); +static void show_configure(pgBackupConfig *config); + +static void show_configure_json(pgBackupConfig *config); + static pgBackupConfig *cur_config = NULL; +static PQExpBufferData show_buf; +static int32 json_level = 0; + /* Set configure options */ int do_configure(bool show_only) @@ -68,7 +82,7 @@ do_configure(bool show_only) config->compress_level = compress_level; if (show_only) - writeBackupCatalogConfig(stderr, config); + show_configure(config); else writeBackupCatalogConfigFile(config); @@ -251,7 +265,6 @@ readBackupCatalogConfigFile(void) pgut_readopt(path, options, ERROR); return config; - } static void @@ -271,3 +284,146 @@ opt_compress_alg(pgut_option *opt, const char *arg) { cur_config->compress_alg = parse_compress_alg(arg); } + +/* + * Initialize configure visualization. + */ +static void +show_configure_start(void) +{ + if (show_format == SHOW_PLAIN) + return; + + /* For now we need buffer only for JSON format */ + json_level = 0; + initPQExpBuffer(&show_buf); +} + +/* + * Finalize configure visualization. + */ +static void +show_configure_end(void) +{ + if (show_format == SHOW_PLAIN) + return; + else + appendPQExpBufferChar(&show_buf, '\n'); + + fputs(show_buf.data, stdout); + termPQExpBuffer(&show_buf); +} + +/* + * Show configure information of pg_probackup. + */ +static void +show_configure(pgBackupConfig *config) +{ + show_configure_start(); + + if (show_format == SHOW_PLAIN) + writeBackupCatalogConfig(stdout, config); + else + show_configure_json(config); + + show_configure_end(); +} + +/* + * Json output. + */ + +static void +show_configure_json(pgBackupConfig *config) +{ + PQExpBuffer buf = &show_buf; + + json_add(buf, JT_BEGIN_OBJECT, &json_level); + + json_add_value(buf, "pgdata", config->pgdata, json_level, false); + + json_add_key(buf, "system-identifier", json_level, true); + appendPQExpBuffer(buf, UINT64_FORMAT, config->system_identifier); + + /* Connection parameters */ + if (config->pgdatabase) + json_add_value(buf, "pgdatabase", config->pgdatabase, json_level, true); + if (config->pghost) + json_add_value(buf, "pghost", config->pghost, json_level, true); + if (config->pgport) + json_add_value(buf, "pgport", config->pgport, json_level, true); + if (config->pguser) + json_add_value(buf, "pguser", config->pguser, json_level, true); + + /* Replica parameters */ + if (config->master_host) + json_add_value(buf, "master-host", config->master_host, json_level, + true); + if (config->master_port) + json_add_value(buf, "master-port", config->master_port, json_level, + true); + if (config->master_db) + json_add_value(buf, "master-db", config->master_db, json_level, true); + if (config->master_user) + json_add_value(buf, "master-user", config->master_user, json_level, + true); + + if (config->replica_timeout != INT_MIN) + { + json_add_key(buf, "replica-timeout", json_level, true); + appendPQExpBuffer(buf, "%d", config->replica_timeout); + } + + /* Logging parameters */ + if (config->log_level_console != INT_MIN) + json_add_value(buf, "log-level-console", + deparse_log_level(config->log_level_console), json_level, + true); + if (config->log_level_file != INT_MIN) + json_add_value(buf, "log-level-file", + deparse_log_level(config->log_level_file), json_level, + true); + if (config->log_filename) + json_add_value(buf, "log-filename", config->log_filename, json_level, + true); + if (config->error_log_filename) + json_add_value(buf, "error-log-filename", config->error_log_filename, + json_level, true); + if (config->log_directory) + json_add_value(buf, "log-directory", config->log_directory, json_level, + true); + + if (config->log_rotation_size) + { + json_add_key(buf, "log-rotation-size", json_level, true); + appendPQExpBuffer(buf, "%d", config->log_rotation_size); + } + if (config->log_rotation_age) + { + json_add_key(buf, "log-rotation-age", json_level, true); + appendPQExpBuffer(buf, "%d", config->log_rotation_age); + } + + /* Retention parameters */ + if (config->retention_redundancy) + { + json_add_key(buf, "retention-redundancy", json_level, true); + appendPQExpBuffer(buf, "%u", config->retention_redundancy); + } + if (config->retention_window) + { + json_add_key(buf, "retention-window", json_level, true); + appendPQExpBuffer(buf, "%u", config->retention_window); + } + + /* Compression parameters */ + json_add_value(buf, "compress-algorithm", + deparse_compress_alg(config->compress_alg), json_level, + true); + + json_add_key(buf, "compress-level", json_level, true); + appendPQExpBuffer(buf, "%d", config->compress_level); + + json_add(buf, JT_END_OBJECT, &json_level); +} diff --git a/src/show.c b/src/show.c index a39ba528..01a558e0 100644 --- a/src/show.c +++ b/src/show.c @@ -17,6 +17,8 @@ #include "pqexpbuffer.h" +#include "utils/json.h" + static void show_instance_start(void); static void show_instance_end(void); @@ -26,24 +28,9 @@ static int show_backup(time_t requested_backup_id); static void show_instance_plain(parray *backup_list, bool show_name); static void show_instance_json(parray *backup_list); -/* Json output functions */ - -typedef enum -{ - JT_BEGIN_ARRAY, - JT_END_ARRAY, - JT_BEGIN_OBJECT, - JT_END_OBJECT -} JsonToken; - -static void json_add(PQExpBuffer buf, JsonToken type); -static void json_add_key(PQExpBuffer buf, const char *name, bool add_comma); -static void json_add_value(PQExpBuffer buf, const char *name, const char *value, - bool add_comma); - static PQExpBufferData show_buf; static bool first_instance = true; -static uint8 json_level = 0; +static int32 json_level = 0; int do_show(time_t requested_backup_id) @@ -377,115 +364,6 @@ show_instance_plain(parray *backup_list, bool show_name) * Json output. */ -static void -json_add_indent(PQExpBuffer buf) -{ - uint8 i; - - if (json_level == 0) - return; - - appendPQExpBufferChar(buf, '\n'); - for (i = 0; i < json_level; i++) - appendPQExpBufferStr(buf, " "); -} - -static void -json_add(PQExpBuffer buf, JsonToken type) -{ - switch (type) - { - case JT_BEGIN_ARRAY: - appendPQExpBufferChar(buf, '['); - json_level++; - break; - case JT_END_ARRAY: - json_level--; - if (json_level == 0) - appendPQExpBufferChar(buf, '\n'); - else - json_add_indent(buf); - appendPQExpBufferChar(buf, ']'); - break; - case JT_BEGIN_OBJECT: - json_add_indent(buf); - appendPQExpBufferChar(buf, '{'); - json_level++; - break; - case JT_END_OBJECT: - json_level--; - if (json_level == 0) - appendPQExpBufferChar(buf, '\n'); - else - json_add_indent(buf); - appendPQExpBufferChar(buf, '}'); - break; - default: - break; - } -} - -static void -json_add_escaped(PQExpBuffer buf, const char *str) -{ - const char *p; - - appendPQExpBufferChar(buf, '"'); - for (p = str; *p; p++) - { - switch (*p) - { - case '\b': - appendPQExpBufferStr(buf, "\\b"); - break; - case '\f': - appendPQExpBufferStr(buf, "\\f"); - break; - case '\n': - appendPQExpBufferStr(buf, "\\n"); - break; - case '\r': - appendPQExpBufferStr(buf, "\\r"); - break; - case '\t': - appendPQExpBufferStr(buf, "\\t"); - break; - case '"': - appendPQExpBufferStr(buf, "\\\""); - break; - case '\\': - appendPQExpBufferStr(buf, "\\\\"); - break; - default: - if ((unsigned char) *p < ' ') - appendPQExpBuffer(buf, "\\u%04x", (int) *p); - else - appendPQExpBufferChar(buf, *p); - break; - } - } - appendPQExpBufferChar(buf, '"'); -} - -static void -json_add_key(PQExpBuffer buf, const char *name, bool add_comma) -{ - if (add_comma) - appendPQExpBufferChar(buf, ','); - json_add_indent(buf); - - json_add_escaped(buf, name); - appendPQExpBufferStr(buf, ": "); -} - -static void -json_add_value(PQExpBuffer buf, const char *name, const char *value, - bool add_comma) -{ - json_add_key(buf, name, add_comma); - json_add_escaped(buf, value); -} - /* * Show instance backups in json format. */ @@ -499,116 +377,126 @@ show_instance_json(parray *backup_list) appendPQExpBufferChar(buf, ','); /* Begin of instance object */ - json_add(buf, JT_BEGIN_OBJECT); + json_add(buf, JT_BEGIN_OBJECT, &json_level); - json_add_value(buf, "instance", instance_name, false); - json_add_key(buf, "backups", true); + json_add_value(buf, "instance", instance_name, json_level, false); + json_add_key(buf, "backups", json_level, true); /* * List backups. */ - json_add(buf, JT_BEGIN_ARRAY); + json_add(buf, JT_BEGIN_ARRAY, &json_level); for (i = 0; i < parray_num(backup_list); i++) { pgBackup *backup = parray_get(backup_list, i); TimeLineID parent_tli; char timestamp[100] = "----"; - char duration[20] = "----"; - char data_bytes_str[10] = "----"; char lsn[20]; if (i != 0) appendPQExpBufferChar(buf, ','); - json_add(buf, JT_BEGIN_OBJECT); + json_add(buf, JT_BEGIN_OBJECT, &json_level); - json_add_value(buf, "id", base36enc(backup->start_time), false); + json_add_value(buf, "id", base36enc(backup->start_time), json_level, + false); if (backup->parent_backup != 0) json_add_value(buf, "parent-backup-id", - base36enc(backup->parent_backup), true); + base36enc(backup->parent_backup), json_level, true); - json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), true); + json_add_value(buf, "backup-mode", pgBackupGetBackupMode(backup), + json_level, true); - json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", true); + json_add_value(buf, "wal", backup->stream ? "STREAM": "ARCHIVE", + json_level, true); json_add_value(buf, "compress-alg", - deparse_compress_alg(backup->compress_alg), true); + deparse_compress_alg(backup->compress_alg), json_level, + true); - json_add_key(buf, "compress-level", true); + json_add_key(buf, "compress-level", json_level, true); appendPQExpBuffer(buf, "%d", backup->compress_level); json_add_value(buf, "from-replica", - backup->from_replica ? "true" : "false", true); + backup->from_replica ? "true" : "false", json_level, + true); - json_add_key(buf, "block-size", true); + json_add_key(buf, "block-size", json_level, true); appendPQExpBuffer(buf, "%u", backup->block_size); - json_add_key(buf, "xlog-block-size", true); + json_add_key(buf, "xlog-block-size", json_level, true); appendPQExpBuffer(buf, "%u", backup->wal_block_size); - json_add_key(buf, "checksum-version", true); + json_add_key(buf, "checksum-version", json_level, true); appendPQExpBuffer(buf, "%u", backup->checksum_version); - json_add_value(buf, "program-version", backup->program_version, true); - json_add_value(buf, "server-version", backup->server_version, true); + json_add_value(buf, "program-version", backup->program_version, + json_level, true); + json_add_value(buf, "server-version", backup->server_version, + json_level, true); - json_add_key(buf, "current-tli", true); + json_add_key(buf, "current-tli", json_level, true); appendPQExpBuffer(buf, "%d", backup->tli); - json_add_key(buf, "parent-tli", true); + json_add_key(buf, "parent-tli", json_level, true); parent_tli = get_parent_tli(backup->tli); appendPQExpBuffer(buf, "%u", parent_tli); snprintf(lsn, lengthof(lsn), "%X/%X", (uint32) (backup->start_lsn >> 32), (uint32) backup->start_lsn); - json_add_value(buf, "start-lsn", lsn, true); + json_add_value(buf, "start-lsn", lsn, json_level, true); snprintf(lsn, lengthof(lsn), "%X/%X", (uint32) (backup->stop_lsn >> 32), (uint32) backup->stop_lsn); - json_add_value(buf, "stop-lsn", lsn, true); + json_add_value(buf, "stop-lsn", lsn, json_level, true); time2iso(timestamp, lengthof(timestamp), backup->start_time); - json_add_value(buf, "start-time", timestamp, true); + json_add_value(buf, "start-time", timestamp, json_level, true); - time2iso(timestamp, lengthof(timestamp), backup->end_time); - json_add_value(buf, "end-time", timestamp, true); + if (backup->end_time) + { + time2iso(timestamp, lengthof(timestamp), backup->end_time); + json_add_value(buf, "end-time", timestamp, json_level, true); + } - json_add_key(buf, "recovery-xid", true); + json_add_key(buf, "recovery-xid", json_level, true); appendPQExpBuffer(buf, XID_FMT, backup->recovery_xid); - time2iso(timestamp, lengthof(timestamp), backup->recovery_time); - json_add_value(buf, "recovery-time", timestamp, true); - - pretty_size(backup->data_bytes, data_bytes_str, - lengthof(data_bytes_str)); - json_add_value(buf, "data-bytes", data_bytes_str, true); - - pretty_size(backup->wal_bytes, data_bytes_str, - lengthof(data_bytes_str)); - json_add_value(buf, "wal-bytes", data_bytes_str, true); - - if (backup->end_time != (time_t) 0) + if (backup->recovery_time > 0) { - snprintf(duration, lengthof(duration), "%.*lfs", 0, - difftime(backup->end_time, backup->start_time)); - json_add_value(buf, "time", duration, true); + time2iso(timestamp, lengthof(timestamp), backup->recovery_time); + json_add_value(buf, "recovery-time", timestamp, json_level, true); + } + + if (backup->data_bytes != BYTES_INVALID) + { + json_add_key(buf, "data-bytes", json_level, true); + appendPQExpBuffer(buf, INT64_FORMAT, backup->data_bytes); + } + + if (backup->wal_bytes != BYTES_INVALID) + { + json_add_key(buf, "wal-bytes", json_level, true); + appendPQExpBuffer(buf, INT64_FORMAT, backup->wal_bytes); } if (backup->primary_conninfo) - json_add_value(buf, "primary_conninfo", backup->primary_conninfo, true); + json_add_value(buf, "primary_conninfo", backup->primary_conninfo, + json_level, true); - json_add_value(buf, "status", status2str(backup->status), true); + json_add_value(buf, "status", status2str(backup->status), json_level, + true); - json_add(buf, JT_END_OBJECT); + json_add(buf, JT_END_OBJECT, &json_level); } /* End of backups */ - json_add(buf, JT_END_ARRAY); + json_add(buf, JT_END_ARRAY, &json_level); /* End of instance object */ - json_add(buf, JT_END_OBJECT); + json_add(buf, JT_END_OBJECT, &json_level); first_instance = false; } diff --git a/src/utils/json.c b/src/utils/json.c new file mode 100644 index 00000000..3afbe9e7 --- /dev/null +++ b/src/utils/json.c @@ -0,0 +1,134 @@ +/*------------------------------------------------------------------------- + * + * json.c: - make json document. + * + * Copyright (c) 2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#include "json.h" + +static void json_add_indent(PQExpBuffer buf, int32 level); +static void json_add_escaped(PQExpBuffer buf, const char *str); + +/* + * Start or end json token. Currently it is a json object or array. + * + * Function modifies level value and adds indent if it appropriate. + */ +void +json_add(PQExpBuffer buf, JsonToken type, int32 *level) +{ + switch (type) + { + case JT_BEGIN_ARRAY: + appendPQExpBufferChar(buf, '['); + *level += 1; + break; + case JT_END_ARRAY: + *level -= 1; + if (*level == 0) + appendPQExpBufferChar(buf, '\n'); + else + json_add_indent(buf, *level); + appendPQExpBufferChar(buf, ']'); + break; + case JT_BEGIN_OBJECT: + json_add_indent(buf, *level); + appendPQExpBufferChar(buf, '{'); + *level += 1; + break; + case JT_END_OBJECT: + *level -= 1; + if (*level == 0) + appendPQExpBufferChar(buf, '\n'); + else + json_add_indent(buf, *level); + appendPQExpBufferChar(buf, '}'); + break; + default: + break; + } +} + +/* + * Add json object's key. If it isn't first key we need to add a comma. + */ +void +json_add_key(PQExpBuffer buf, const char *name, int32 level, bool add_comma) +{ + if (add_comma) + appendPQExpBufferChar(buf, ','); + json_add_indent(buf, level); + + json_add_escaped(buf, name); + appendPQExpBufferStr(buf, ": "); +} + +/* + * Add json object's key and value. If it isn't first key we need to add a + * comma. + */ +void +json_add_value(PQExpBuffer buf, const char *name, const char *value, + int32 level, bool add_comma) +{ + json_add_key(buf, name, level, add_comma); + json_add_escaped(buf, value); +} + +static void +json_add_indent(PQExpBuffer buf, int32 level) +{ + uint16 i; + + if (level == 0) + return; + + appendPQExpBufferChar(buf, '\n'); + for (i = 0; i < level; i++) + appendPQExpBufferStr(buf, " "); +} + +static void +json_add_escaped(PQExpBuffer buf, const char *str) +{ + const char *p; + + appendPQExpBufferChar(buf, '"'); + for (p = str; *p; p++) + { + switch (*p) + { + case '\b': + appendPQExpBufferStr(buf, "\\b"); + break; + case '\f': + appendPQExpBufferStr(buf, "\\f"); + break; + case '\n': + appendPQExpBufferStr(buf, "\\n"); + break; + case '\r': + appendPQExpBufferStr(buf, "\\r"); + break; + case '\t': + appendPQExpBufferStr(buf, "\\t"); + break; + case '"': + appendPQExpBufferStr(buf, "\\\""); + break; + case '\\': + appendPQExpBufferStr(buf, "\\\\"); + break; + default: + if ((unsigned char) *p < ' ') + appendPQExpBuffer(buf, "\\u%04x", (int) *p); + else + appendPQExpBufferChar(buf, *p); + break; + } + } + appendPQExpBufferChar(buf, '"'); +} diff --git a/src/utils/json.h b/src/utils/json.h new file mode 100644 index 00000000..cf5a7064 --- /dev/null +++ b/src/utils/json.h @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * json.h: - prototypes of json output functions. + * + * Copyright (c) 2018, Postgres Professional + * + *------------------------------------------------------------------------- + */ + +#ifndef PROBACKUP_JSON_H +#define PROBACKUP_JSON_H + +#include "postgres_fe.h" +#include "pqexpbuffer.h" + +/* + * Json document tokens. + */ +typedef enum +{ + JT_BEGIN_ARRAY, + JT_END_ARRAY, + JT_BEGIN_OBJECT, + JT_END_OBJECT +} JsonToken; + +extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level); +extern void json_add_key(PQExpBuffer buf, const char *name, int32 level, + bool add_comma); +extern void json_add_value(PQExpBuffer buf, const char *name, const char *value, + int32 level, bool add_comma); + +#endif /* PROBACKUP_JSON_H */ From 9cd12fbe055b0bf1e494357713b88d2c1c622680 Mon Sep 17 00:00:00 2001 From: Arthur Zakirov Date: Thu, 7 Jun 2018 14:54:25 +0300 Subject: [PATCH 7/7] PGPRO-533: Update hellp command --- src/configure.c | 4 ++++ src/help.c | 8 ++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/configure.c b/src/configure.c index 0410dc74..dc80981a 100644 --- a/src/configure.c +++ b/src/configure.c @@ -29,6 +29,10 @@ static pgBackupConfig *cur_config = NULL; static PQExpBufferData show_buf; static int32 json_level = 0; +/* + * All this code needs refactoring. + */ + /* Set configure options */ int do_configure(bool show_only) diff --git a/src/help.c b/src/help.c index a41e8167..28500e75 100644 --- a/src/help.c +++ b/src/help.c @@ -89,6 +89,7 @@ help_pg_probackup(void) printf(_(" [--replica-timeout=timeout]\n")); printf(_("\n %s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME); + printf(_(" [--format=format]\n")); printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [-C] [--stream [-S slot-name]] [--backup-pg-log]\n")); @@ -358,7 +359,8 @@ static void help_show(void) { printf(_("%s show -B backup-dir\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n\n")); + printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_(" [--format=format]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name show info about specific intstance\n")); @@ -474,10 +476,12 @@ help_set_config(void) static void help_show_config(void) { - printf(_("%s show-config -B backup-dir --instance=instance_name\n\n"), PROGRAM_NAME); + printf(_("%s show-config -B backup-dir --instance=instance_name\n"), PROGRAM_NAME); + printf(_(" [--format=format]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); } static void