diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml
index 0efbf2c0..a94a23c3 100644
--- a/doc/pgprobackup.xml
+++ b/doc/pgprobackup.xml
@@ -2402,6 +2402,11 @@ primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmod
password is not included.
+
+
+ note — text note attached to backup.
+
+
@@ -3377,15 +3382,28 @@ pg_probackup set-config -B backup_dir --instance set-backup
pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id
-{--ttl=ttl | --expire-time=time} [--help]
+{--ttl=ttl | --expire-time=time}
+[--note=backup_note] [--help]
Sets the provided backup-specific settings into the
backup.control configuration file, or modifies the previously
defined values.
+
+
+
- For all available settings, see the section
+ Sets the text note for backup copy.
+ If backup_note contain newline characters,
+ then only substring before first newline character will be saved.
+ Max size of text note is 1 KB.
+ The 'none' value removes current note.
+
+
+
+
+ For all available pinning settings, see the section
Pinning Options.
@@ -3445,7 +3463,7 @@ pg_probackup backup -B backup_dir -b bac
[--no-validate] [--skip-block-validation]
[-w --no-password] [-W --password]
[--archive-timeout=timeout] [--external-dirs=external_directory_path]
-[--no-sync]
+[--no-sync] [--note=backup_note]
[connection_options] [compression_options] [remote_options]
[retention_options] [pinning_options] [logging_options]
@@ -3612,6 +3630,19 @@ pg_probackup backup -B backup_dir -b bac
+
+
+
+
+ Sets the text note for backup copy.
+ If backup_note contain newline characters,
+ then only substring before first newline character will be saved.
+ Max size of text note is 1 KB.
+ The 'none' value removes current note.
+
+
+
+
diff --git a/src/backup.c b/src/backup.c
index 5d29781b..258e3c00 100644
--- a/src/backup.c
+++ b/src/backup.c
@@ -156,7 +156,6 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync)
/* used for multitimeline incremental backup */
parray *tli_list = NULL;
-
/* for fancy reporting */
time_t start_time, end_time;
char pretty_time[20];
@@ -820,6 +819,7 @@ do_backup(time_t start_time, bool no_validate,
/* Update backup status and other metainfo. */
current.status = BACKUP_STATUS_RUNNING;
current.start_time = start_time;
+
StrNCpy(current.program_version, PROGRAM_VERSION,
sizeof(current.program_version));
@@ -902,6 +902,10 @@ do_backup(time_t start_time, bool no_validate,
if (instance_config.master_conn_opt.pghost == NULL)
elog(ERROR, "Options for connection to master must be provided to perform backup from replica");
+ /* add note to backup if requested */
+ if (set_backup_params && set_backup_params->note)
+ add_note(¤t, set_backup_params->note);
+
/* backup data */
do_backup_instance(backup_conn, &nodeInfo, no_sync);
pgut_atexit_pop(backup_cleanup, NULL);
@@ -934,8 +938,7 @@ do_backup(time_t start_time, bool no_validate,
(set_backup_params->ttl > 0 ||
set_backup_params->expire_time > 0))
{
- if (!pin_backup(¤t, set_backup_params))
- elog(ERROR, "Failed to pin the backup %s", base36enc(current.backup_id));
+ pin_backup(¤t, set_backup_params);
}
if (!no_validate)
diff --git a/src/catalog.c b/src/catalog.c
index ca723083..f1faaaec 100644
--- a/src/catalog.c
+++ b/src/catalog.c
@@ -1534,18 +1534,23 @@ do_set_backup(const char *instance_name, time_t backup_id,
target_backup = (pgBackup *) parray_get(backup_list, 0);
- if (!pin_backup(target_backup, set_backup_params))
- elog(ERROR, "Failed to pin the backup %s", base36enc(backup_id));
+ /* Pin or unpin backup if requested */
+ if (set_backup_params->ttl >= 0 || set_backup_params->expire_time > 0)
+ pin_backup(target_backup, set_backup_params);
+
+ if (set_backup_params->note)
+ add_note(target_backup, set_backup_params->note);
}
/*
* Set 'expire-time' attribute based on set_backup_params, or unpin backup
* if ttl is equal to zero.
*/
-bool
+void
pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params)
{
+ /* sanity, backup must have positive recovery-time */
if (target_backup->recovery_time <= 0)
elog(ERROR, "Failed to set 'expire-time' for backup %s: invalid 'recovery-time'",
base36enc(target_backup->backup_id));
@@ -1563,7 +1568,7 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params)
{
elog(WARNING, "Backup %s is not pinned, nothing to unpin",
base36enc(target_backup->start_time));
- return false;
+ return;
}
target_backup->expire_time = 0;
}
@@ -1571,7 +1576,8 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params)
else if (set_backup_params->expire_time > 0)
target_backup->expire_time = set_backup_params->expire_time;
else
- return false;
+ /* nothing to do */
+ return;
/* Update backup.control */
write_backup(target_backup);
@@ -1587,7 +1593,44 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params)
else
elog(INFO, "Backup %s is unpinned", base36enc(target_backup->start_time));
- return true;
+ return;
+}
+
+/*
+ * Add note to backup metadata or unset already existing note.
+ * It is a job of the caller to make sure that note is not NULL.
+ */
+void
+add_note(pgBackup *target_backup, char *note)
+{
+
+ char *note_string;
+
+ /* unset note */
+ if (pg_strcasecmp(note, "none") == 0)
+ {
+ target_backup->note = NULL;
+ elog(INFO, "Removing note from backup %s",
+ base36enc(target_backup->start_time));
+ }
+ else
+ {
+ /* Currently we do not allow string with newlines as note,
+ * because it will break parsing of backup.control.
+ * So if user provides string like this "aaa\nbbbbb",
+ * we save only "aaa"
+ * Example: tests.set_backup.SetBackupTest.test_add_note_newlines
+ */
+ note_string = pgut_malloc(MAX_NOTE_SIZE);
+ sscanf(note, "%[^\n]", note_string);
+
+ target_backup->note = note_string;
+ elog(INFO, "Adding note to backup %s: '%s'",
+ base36enc(target_backup->start_time), target_backup->note);
+ }
+
+ /* Update backup.control */
+ write_backup(target_backup);
}
/*
@@ -1682,6 +1725,10 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
/* print external directories list */
if (backup->external_dir_str)
fio_fprintf(out, "external-dirs = '%s'\n", backup->external_dir_str);
+
+ if (backup->note)
+ fio_fprintf(out, "note = '%s'\n", backup->note);
+
}
/*
@@ -1919,6 +1966,7 @@ readBackupControlFile(const char *path)
{'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT},
{'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT},
{'s', 0, "external-dirs", &backup->external_dir_str, SOURCE_FILE_STRICT},
+ {'s', 0, "note", &backup->note, SOURCE_FILE_STRICT},
{0}
};
@@ -2185,6 +2233,8 @@ pgBackupInit(pgBackup *backup)
backup->external_dir_str = NULL;
backup->root_dir = NULL;
backup->files = NULL;
+ backup->note = NULL;
+
}
/* free pgBackup object */
@@ -2196,6 +2246,7 @@ pgBackupFree(void *backup)
pfree(b->primary_conninfo);
pfree(b->external_dir_str);
pfree(b->root_dir);
+ pfree(b->note);
pfree(backup);
}
diff --git a/src/help.c b/src/help.c
index a048afba..153fe6aa 100644
--- a/src/help.c
+++ b/src/help.c
@@ -106,7 +106,8 @@ help_pg_probackup(void)
printf(_(" [--help]\n"));
printf(_("\n %s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
- printf(_(" -i backup-id [--ttl] [--expire-time]\n"));
+ printf(_(" -i backup-id [--ttl=interval] [--expire-time=timestamp]\n"));
+ printf(_(" [--note=text]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
@@ -140,7 +141,7 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
- printf(_(" [--ttl] [--expire-time]\n"));
+ printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n"));
printf(_(" [--help]\n"));
@@ -285,7 +286,7 @@ help_backup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
- printf(_(" [--ttl] [--expire-time]\n\n"));
+ printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n"));
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n"));
@@ -304,6 +305,8 @@ help_backup(void)
printf(_(" backup some directories not from pgdata \n"));
printf(_(" (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n"));
printf(_(" --no-sync do not sync backed up files to disk\n"));
+ printf(_(" --note=text add note to backup\n"));
+ printf(_(" (example: --note='backup before app update to v13.1')\n"));
printf(_("\n Logging options:\n"));
printf(_(" --log-level-console=log-level-console\n"));
@@ -341,8 +344,9 @@ help_backup(void)
printf(_(" --dry-run perform a trial run without any changes\n"));
printf(_("\n Pinning options:\n"));
- printf(_(" --ttl=ttl pin backup for specified amount of time; 0 unpin\n"));
+ printf(_(" --ttl=interval pin backup for specified amount of time; 0 unpin\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n"));
+ printf(_(" (example: --ttl=20d)\n"));
printf(_(" --expire-time=time pin backup until specified time stamp\n"));
printf(_(" (example: --expire-time='2024-01-01 00:00:00+03')\n"));
@@ -710,12 +714,15 @@ help_set_backup(void)
{
printf(_("\n%s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id\n"));
- printf(_(" [--ttl] [--expire-time]\n\n"));
+ printf(_(" [--ttl=interval] [--expire-time=time] [--note=text]\n\n"));
- printf(_(" --ttl=ttl pin backup for specified amount of time; 0 unpin\n"));
+ printf(_(" --ttl=interval pin backup for specified amount of time; 0 unpin\n"));
printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n"));
+ printf(_(" (example: --ttl=20d)\n"));
printf(_(" --expire-time=time pin backup until specified time stamp\n"));
printf(_(" (example: --expire-time='2024-01-01 00:00:00+03')\n"));
+ printf(_(" --note=text add note to backup; 'none' to remove note\n"));
+ printf(_(" (example: --note='backup before app update to v13.1')\n"));
}
static void
diff --git a/src/merge.c b/src/merge.c
index 06da8f4b..bb2c0d69 100644
--- a/src/merge.c
+++ b/src/merge.c
@@ -702,6 +702,15 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup)
full_backup->compress_alg = dest_backup->compress_alg;
full_backup->compress_level = dest_backup->compress_level;
+ /* If incremental backup is pinned,
+ * then result FULL backup must also be pinned.
+ */
+ if (dest_backup->expire_time)
+ full_backup->expire_time = dest_backup->expire_time;
+
+ if (dest_backup->note)
+ full_backup->note = pgut_strdup(dest_backup->note);
+
/* FULL backup must inherit wal mode. */
full_backup->stream = dest_backup->stream;
diff --git a/src/pg_probackup.c b/src/pg_probackup.c
index e35527b0..e7c3dce6 100644
--- a/src/pg_probackup.c
+++ b/src/pg_probackup.c
@@ -75,10 +75,10 @@ char *replication_slot = NULL;
bool temp_slot = false;
/* backup options */
-bool backup_logs = false;
-bool smooth_checkpoint;
-char *remote_agent;
-
+bool backup_logs = false;
+bool smooth_checkpoint;
+char *remote_agent;
+static char *backup_note = NULL;
/* restore options */
static char *target_time = NULL;
static char *target_xid = NULL;
@@ -183,6 +183,7 @@ static ConfigOption cmd_options[] =
{ 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT },
{ 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT },
{ 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT },
+ { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT },
/* restore options */
{ 's', 136, "recovery-target-time", &target_time, SOURCE_CMD_STRICT },
{ 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT },
@@ -746,11 +747,15 @@ main(int argc, char *argv[])
expire_time_string);
}
- if (expire_time > 0 || ttl >= 0)
+ if (expire_time > 0 || ttl >= 0 || backup_note)
{
set_backup_params = pgut_new(pgSetBackupParams);
set_backup_params->ttl = ttl;
set_backup_params->expire_time = expire_time;
+ set_backup_params->note = backup_note;
+
+ if (backup_note && strlen(backup_note) > MAX_NOTE_SIZE)
+ elog(ERROR, "Backup note cannot exceed %u bytes", MAX_NOTE_SIZE);
}
}
diff --git a/src/pg_probackup.h b/src/pg_probackup.h
index bbf556a0..275956f8 100644
--- a/src/pg_probackup.h
+++ b/src/pg_probackup.h
@@ -90,6 +90,8 @@ extern const char *PROGRAM_EMAIL;
/* retry attempts */
#define PAGE_READ_ATTEMPTS 100
+#define MAX_NOTE_SIZE 1024
+
/* Check if an XLogRecPtr value is pointed to 0 offset */
#define XRecOffIsNull(xlrp) \
((xlrp) % XLOG_BLCKSZ == 0)
@@ -390,6 +392,7 @@ struct pgBackup
backup_path/instance_name/backup_id */
parray *files; /* list of files belonging to this backup
* must be populated explicitly */
+ char *note;
};
/* Recovery target for restore and validate subcommands */
@@ -433,13 +436,14 @@ typedef struct pgRestoreParams
/* Options needed for set-backup command */
typedef struct pgSetBackupParams
{
- int64 ttl; /* amount of time backup must be pinned
+ int64 ttl; /* amount of time backup must be pinned
* -1 - do nothing
* 0 - disable pinning
*/
- time_t expire_time; /* Point in time before which backup
+ time_t expire_time; /* Point in time until backup
* must be pinned.
*/
+ char *note;
} pgSetBackupParams;
typedef struct
@@ -778,8 +782,9 @@ extern void timelineInfoFree(void *tliInfo);
extern parray *catalog_get_timelines(InstanceConfig *instance);
extern void do_set_backup(const char *instance_name, time_t backup_id,
pgSetBackupParams *set_backup_params);
-extern bool pin_backup(pgBackup *target_backup,
+extern void pin_backup(pgBackup *target_backup,
pgSetBackupParams *set_backup_params);
+extern void add_note(pgBackup *target_backup, char *note);
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
extern void write_backup_filelist(pgBackup *backup, parray *files,
const char *root, parray *external_list);
diff --git a/src/show.c b/src/show.c
index fd69ff52..fd4b460e 100644
--- a/src/show.c
+++ b/src/show.c
@@ -430,6 +430,11 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup)
json_add_value(buf, "status", status2str(backup->status), json_level,
true);
+ if (backup->note){
+ json_add_value(buf, "note", backup->note,
+ json_level, true);
+
+ }
json_add(buf, JT_END_OBJECT, &json_level);
}
diff --git a/tests/backup.py b/tests/backup.py
index ef418c97..e0493009 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -2709,3 +2709,40 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
+
+ def test_note_sanity(self):
+ """
+ test that adding note to backup works as expected
+ """
+ fname = self.id().split('.')[3]
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ initdb_params=['--data-checksums'])
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ # FULL backup
+ backup_id = self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '--log-level-file=LOG', '--note=test_note'])
+
+ show_backups = self.show_pb(backup_dir, 'node')
+
+ print(self.show_pb(backup_dir, as_text=True, as_json=True))
+
+ self.assertEqual(show_backups[0]['note'], "test_note")
+
+ self.set_backup(backup_dir, 'node', backup_id, options=['--note=none'])
+
+ backup_meta = self.show_pb(backup_dir, 'node', backup_id)
+
+ self.assertNotIn(
+ 'note',
+ backup_meta)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
diff --git a/tests/merge.py b/tests/merge.py
index 8f6ccbb5..2bc09668 100644
--- a/tests/merge.py
+++ b/tests/merge.py
@@ -22,8 +22,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
- initdb_params=["--data-checksums"]
- )
+ initdb_params=["--data-checksums"])
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
@@ -1981,8 +1980,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
- pg_options={
- 'autovacuum': 'off'})
+ pg_options={'autovacuum': 'off'})
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
@@ -2384,5 +2382,60 @@ class MergeTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
+ def test_merge_correct_inheritance(self):
+ """
+ Make sure that backup metainformation fields
+ 'note' and 'expire-time' are correctly inherited
+ during merge
+ """
+ fname = self.id().split('.')[3]
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ set_replication=True,
+ initdb_params=['--data-checksums'],
+ pg_options={'autovacuum': 'off'})
+
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ self.set_archiving(backup_dir, 'node', node)
+ node.slow_start()
+
+ # add database
+ node.safe_psql(
+ 'postgres',
+ 'CREATE DATABASE testdb')
+
+ # take FULL backup
+ self.backup_node(backup_dir, 'node', node, options=['--stream'])
+
+ # create database
+ node.safe_psql(
+ 'postgres',
+ 'create DATABASE testdb1')
+
+ # take PAGE backup
+ page_id = self.backup_node(
+ backup_dir, 'node', node, backup_type='page')
+
+ self.set_backup(
+ backup_dir, 'node', page_id, options=['--note=hello', '--ttl=20d'])
+
+ page_meta = self.show_pb(backup_dir, 'node', page_id)
+
+ self.merge_backup(backup_dir, 'node', page_id)
+
+ print(self.show_pb(backup_dir, 'node', page_id))
+
+ self.assertEqual(
+ page_meta['note'],
+ self.show_pb(backup_dir, 'node', page_id)['note'])
+
+ self.assertEqual(
+ page_meta['expire-time'],
+ self.show_pb(backup_dir, 'node', page_id)['expire-time'])
+
+ self.del_test_dir(module_name, fname)
+
# 1. Need new test with corrupted FULL backup
# 2. different compression levels
diff --git a/tests/retention.py b/tests/retention.py
index 4885187f..e797d3c6 100644
--- a/tests/retention.py
+++ b/tests/retention.py
@@ -1840,335 +1840,6 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
- # @unittest.expectedFailure
- @unittest.skip("skip")
- def test_wal_depth(self):
- """
- ARCHIVE replica:
-
- t6 |---------------------->
- t5 | |------>
- | |
- t4 | |----|------>
- | |
- t3 | |--B1--|/|--B2-|/|-B3-->
- | |
- t2 |--A1-----|--A2--->
- t1 ---------Y1--Y2-|
-
- ARCHIVE master:
- t1 -Z1--Z2-->
- """
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
- set_replication=True,
- initdb_params=['--data-checksums'],
- pg_options={
- 'archive_timeout': '30s',
- 'checkpoint_timeout': '30s',
- 'autovacuum': 'off'})
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'master', master)
- self.set_archiving(backup_dir, 'master', master)
-
- master.slow_start()
-
- # FULL
- master.safe_psql(
- "postgres",
- "create table t_heap as select i as id, md5(i::text) as text, "
- "md5(repeat(i::text,10))::tsvector as tsvector "
- "from generate_series(0,10000) i")
-
- self.backup_node(backup_dir, 'master', master)
-
- # PAGE
- master.safe_psql(
- "postgres",
- "insert into t_heap select i as id, md5(i::text) as text, "
- "md5(repeat(i::text,10))::tsvector as tsvector "
- "from generate_series(10000,20000) i")
-
- self.backup_node(
- backup_dir, 'master', master, backup_type='page')
-
- replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
- replica.cleanup()
- self.restore_node(backup_dir, 'master', replica)
- self.set_replica(master, replica)
-
- self.add_instance(backup_dir, 'replica', replica)
- self.set_archiving(backup_dir, 'replica', replica, replica=True)
-
- copy_tree(
- os.path.join(backup_dir, 'wal', 'master'),
- os.path.join(backup_dir, 'wal', 'replica'))
-
- # Check data correctness on replica
- replica.slow_start(replica=True)
-
- # FULL backup replica
- Y1 = self.backup_node(
- backup_dir, 'replica', replica,
- options=['--stream', '--archive-timeout=60s'])
-
- master.pgbench_init(scale=5)
-
- # PAGE backup replica
- Y2 = self.backup_node(
- backup_dir, 'replica', replica,
- backup_type='page', options=['--stream', '--archive-timeout=60s'])
-
- # create timeline t2
- replica.promote()
-
- # do checkpoint to increment timeline ID in pg_control
- replica.safe_psql(
- 'postgres',
- 'CHECKPOINT')
-
- # FULL backup replica
- A1 = self.backup_node(
- backup_dir, 'replica', replica)
-
- replica.pgbench_init(scale=5)
-
- replica.safe_psql(
- 'postgres',
- "CREATE TABLE t1 (a text)")
-
- target_xid = None
- with replica.connect("postgres") as con:
- res = con.execute(
- "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)")
- con.commit()
- target_xid = res[0][0]
-
- # DELTA backup replica
- A2 = self.backup_node(
- backup_dir, 'replica', replica, backup_type='delta')
-
- # create timeline t3
- replica.cleanup()
- self.restore_node(
- backup_dir, 'replica', replica,
- options=[
- '--recovery-target-xid={0}'.format(target_xid),
- '--recovery-target-timeline=2',
- '--recovery-target-action=promote'])
-
- replica.slow_start()
-
- B1 = self.backup_node(
- backup_dir, 'replica', replica)
-
- replica.pgbench_init(scale=2)
-
- B2 = self.backup_node(
- backup_dir, 'replica', replica, backup_type='page')
-
- replica.pgbench_init(scale=2)
-
- target_xid = None
- with replica.connect("postgres") as con:
- res = con.execute(
- "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)")
- con.commit()
- target_xid = res[0][0]
-
- B3 = self.backup_node(
- backup_dir, 'replica', replica, backup_type='page')
-
- replica.pgbench_init(scale=2)
-
- # create timeline t4
- replica.cleanup()
- self.restore_node(
- backup_dir, 'replica', replica,
- options=[
- '--recovery-target-xid={0}'.format(target_xid),
- '--recovery-target-timeline=3',
- '--recovery-target-action=promote'])
-
- replica.slow_start()
-
- replica.safe_psql(
- 'postgres',
- 'CREATE TABLE '
- 't2 as select i, '
- 'repeat(md5(i::text),5006056) as fat_attr '
- 'from generate_series(0,6) i')
-
- target_xid = None
- with replica.connect("postgres") as con:
- res = con.execute(
- "INSERT INTO t1 VALUES ('inserted') RETURNING (xmin)")
- con.commit()
- target_xid = res[0][0]
-
- replica.safe_psql(
- 'postgres',
- 'CREATE TABLE '
- 't3 as select i, '
- 'repeat(md5(i::text),5006056) as fat_attr '
- 'from generate_series(0,10) i')
-
- # create timeline t5
- replica.cleanup()
- self.restore_node(
- backup_dir, 'replica', replica,
- options=[
- '--recovery-target-xid={0}'.format(target_xid),
- '--recovery-target-timeline=4',
- '--recovery-target-action=promote'])
-
- replica.slow_start()
-
- replica.safe_psql(
- 'postgres',
- 'CREATE TABLE '
- 't4 as select i, '
- 'repeat(md5(i::text),5006056) as fat_attr '
- 'from generate_series(0,6) i')
-
- # create timeline t6
- replica.cleanup()
-
- self.restore_node(
- backup_dir, 'replica', replica, backup_id=A1,
- options=[
- '--recovery-target=immediate',
- '--recovery-target-action=promote'])
- replica.slow_start()
-
- replica.pgbench_init(scale=2)
-
- show = self.show_archive(backup_dir, as_text=True)
- show = self.show_archive(backup_dir)
-
- for instance in show:
- if instance['instance'] == 'replica':
- replica_timelines = instance['timelines']
-
- if instance['instance'] == 'master':
- master_timelines = instance['timelines']
-
- # check that all timelines are ok
- for timeline in replica_timelines:
- self.assertTrue(timeline['status'], 'OK')
-
- # check that all timelines are ok
- for timeline in master_timelines:
- self.assertTrue(timeline['status'], 'OK')
-
- # create holes in t3
- wals_dir = os.path.join(backup_dir, 'wal', 'replica')
- wals = [
- f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))
- and not f.endswith('.backup') and not f.endswith('.history') and f.startswith('00000003')
- ]
- wals.sort()
-
- # check that t3 is ok
- self.show_archive(backup_dir)
-
- file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000017')
- if self.archive_compress:
- file = file + '.gz'
- os.remove(file)
-
- file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000012')
- if self.archive_compress:
- file = file + '.gz'
- os.remove(file)
-
- file = os.path.join(backup_dir, 'wal', 'replica', '000000030000000000000013')
- if self.archive_compress:
- file = file + '.gz'
- os.remove(file)
-
- # check that t3 is not OK
- show = self.show_archive(backup_dir)
-
- show = self.show_archive(backup_dir)
-
- for instance in show:
- if instance['instance'] == 'replica':
- replica_timelines = instance['timelines']
-
- # sanity
- for timeline in replica_timelines:
- if timeline['tli'] == 1:
- timeline_1 = timeline
- continue
-
- if timeline['tli'] == 2:
- timeline_2 = timeline
- continue
-
- if timeline['tli'] == 3:
- timeline_3 = timeline
- continue
-
- if timeline['tli'] == 4:
- timeline_4 = timeline
- continue
-
- if timeline['tli'] == 5:
- timeline_5 = timeline
- continue
-
- if timeline['tli'] == 6:
- timeline_6 = timeline
- continue
-
- self.assertEqual(timeline_6['status'], "OK")
- self.assertEqual(timeline_5['status'], "OK")
- self.assertEqual(timeline_4['status'], "OK")
- self.assertEqual(timeline_3['status'], "DEGRADED")
- self.assertEqual(timeline_2['status'], "OK")
- self.assertEqual(timeline_1['status'], "OK")
-
- self.assertEqual(len(timeline_3['lost-segments']), 2)
- self.assertEqual(timeline_3['lost-segments'][0]['begin-segno'], '0000000000000012')
- self.assertEqual(timeline_3['lost-segments'][0]['end-segno'], '0000000000000013')
- self.assertEqual(timeline_3['lost-segments'][1]['begin-segno'], '0000000000000017')
- self.assertEqual(timeline_3['lost-segments'][1]['end-segno'], '0000000000000017')
-
- self.assertEqual(len(timeline_6['backups']), 0)
- self.assertEqual(len(timeline_5['backups']), 0)
- self.assertEqual(len(timeline_4['backups']), 0)
- self.assertEqual(len(timeline_3['backups']), 3)
- self.assertEqual(len(timeline_2['backups']), 2)
- self.assertEqual(len(timeline_1['backups']), 2)
-
- # check closest backup correctness
- self.assertEqual(timeline_6['closest-backup-id'], A1)
- self.assertEqual(timeline_5['closest-backup-id'], B2)
- self.assertEqual(timeline_4['closest-backup-id'], B2)
- self.assertEqual(timeline_3['closest-backup-id'], A1)
- self.assertEqual(timeline_2['closest-backup-id'], Y2)
-
- # check parent tli correctness
- self.assertEqual(timeline_6['parent-tli'], 2)
- self.assertEqual(timeline_5['parent-tli'], 4)
- self.assertEqual(timeline_4['parent-tli'], 3)
- self.assertEqual(timeline_3['parent-tli'], 2)
- self.assertEqual(timeline_2['parent-tli'], 1)
- self.assertEqual(timeline_1['parent-tli'], 0)
-
- output = self.delete_pb(
- backup_dir, 'replica',
- options=['--delete-wal', '--log-level-console=verbose'])
-
- self.validate_pb(backup_dir, 'node')
-
- self.del_test_dir(module_name, fname)
-
def test_wal_depth_1(self):
"""
|-------------B5----------> WAL timeline3
diff --git a/tests/set_backup.py b/tests/set_backup.py
index b8d97ad5..861de756 100644
--- a/tests/set_backup.py
+++ b/tests/set_backup.py
@@ -383,5 +383,92 @@ class SetBackupTest(ProbackupTest, unittest.TestCase):
self.validate_pb(backup_dir)
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_add_note_newlines(self):
+ """"""
+ fname = self.id().split('.')[3]
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ initdb_params=['--data-checksums'])
+
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ node.slow_start()
+
+ # FULL
+ backup_id = self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '--note={0}'.format('hello\nhello')])
+
+ backup_meta = self.show_pb(backup_dir, 'node', backup_id)
+ self.assertEqual(backup_meta['note'], "hello")
+
+ self.set_backup(backup_dir, 'node', backup_id, options=['--note=hello\nhello'])
+
+ backup_meta = self.show_pb(backup_dir, 'node', backup_id)
+ self.assertEqual(backup_meta['note'], "hello")
+
+ self.set_backup(backup_dir, 'node', backup_id, options=['--note=none'])
+
+ backup_meta = self.show_pb(backup_dir, 'node', backup_id)
+ self.assertNotIn('note', backup_meta)
+
+ # Clean after yourself
+ self.del_test_dir(module_name, fname)
+
+ # @unittest.skip("skip")
+ def test_add_big_note(self):
+ """"""
+ fname = self.id().split('.')[3]
+ node = self.make_simple_node(
+ base_dir=os.path.join(module_name, fname, 'node'),
+ initdb_params=['--data-checksums'])
+
+ backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ self.init_pb(backup_dir)
+ self.add_instance(backup_dir, 'node', node)
+ node.slow_start()
+
+# note = node.safe_psql(
+# "postgres",
+# "SELECT repeat('hello', 400)").rstrip() # TODO: investigate
+
+ note = node.safe_psql(
+ "postgres",
+ "SELECT repeat('hello', 210)").rstrip()
+
+ # FULL
+ try:
+ self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '--note={0}'.format(note)])
+ # we should die here because exception is what we expect to happen
+ self.assertEqual(
+ 1, 0,
+ "Expecting Error because note is too large "
+ "\n Output: {0} \n CMD: {1}".format(
+ repr(self.output), self.cmd))
+ except ProbackupException as e:
+ self.assertIn(
+ "ERROR: Backup note cannot exceed 1024 bytes",
+ e.message,
+ "\n Unexpected Error Message: {0}\n CMD: {1}".format(
+ repr(e.message), self.cmd))
+
+ note = node.safe_psql(
+ "postgres",
+ "SELECT repeat('hello', 200)").rstrip()
+
+ backup_id = self.backup_node(
+ backup_dir, 'node', node,
+ options=['--stream', '--note={0}'.format(note)])
+
+ backup_meta = self.show_pb(backup_dir, 'node', backup_id)
+ self.assertEqual(backup_meta['note'], note)
+
# Clean after yourself
self.del_test_dir(module_name, fname)
\ No newline at end of file