1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-13 14:58:35 +02:00

Check in SHOW command that backups in RUNNING status

This commit is contained in:
Artur Zakirov 2017-03-06 11:55:12 +03:00
parent 518142011a
commit 6fedb2e546
8 changed files with 118 additions and 21 deletions

View File

@ -432,7 +432,7 @@ do_backup(bool smooth_checkpoint)
elog(LOG, "----------------------------------------"); elog(LOG, "----------------------------------------");
/* get exclusive lock of backup catalog */ /* get exclusive lock of backup catalog */
catalog_lock(true); catalog_lock(true, NULL);
/* initialize backup result */ /* initialize backup result */
current.status = BACKUP_STATUS_RUNNING; current.status = BACKUP_STATUS_RUNNING;

View File

@ -42,8 +42,8 @@ unlink_lock_atexit(void)
/* /*
* Create a lockfile. * Create a lockfile.
*/ */
int void
catalog_lock(bool check_catalog) catalog_lock(bool check_catalog, pid_t *run_pid)
{ {
int fd; int fd;
char buffer[MAXPGPATH * 2 + 256]; char buffer[MAXPGPATH * 2 + 256];
@ -53,6 +53,9 @@ catalog_lock(bool check_catalog)
pid_t my_pid, pid_t my_pid,
my_p_pid; my_p_pid;
if (run_pid)
*run_pid = 0;
join_path_components(lock_file, backup_path, BACKUP_CATALOG_PID); join_path_components(lock_file, backup_path, BACKUP_CATALOG_PID);
/* /*
@ -149,7 +152,16 @@ catalog_lock(bool check_catalog)
{ {
if (kill(encoded_pid, 0) == 0 || if (kill(encoded_pid, 0) == 0 ||
(errno != ESRCH && errno != EPERM)) (errno != ESRCH && errno != EPERM))
elog(ERROR, "lock file \"%s\" already exists", lock_file); {
/* If run_pid was specified just return encoded_pid */
if (run_pid)
{
*run_pid = encoded_pid;
return;
}
else
elog(ERROR, "lock file \"%s\" already exists", lock_file);
}
} }
/* /*
@ -220,8 +232,6 @@ catalog_lock(bool check_catalog)
elog(ERROR, "Backup directory was initialized for system id = %ld, but target system id = %ld", elog(ERROR, "Backup directory was initialized for system id = %ld, but target system id = %ld",
system_identifier, id); system_identifier, id);
} }
return 0;
} }
/* /*

View File

@ -31,7 +31,7 @@ do_delete(time_t backup_id)
elog(ERROR, "required backup ID not specified"); elog(ERROR, "required backup ID not specified");
/* Lock backup catalog */ /* Lock backup catalog */
catalog_lock(false); catalog_lock(false, NULL);
/* Get complete list of backups */ /* Get complete list of backups */
backup_list = catalog_get_backup_list(0); backup_list = catalog_get_backup_list(0);
@ -98,7 +98,7 @@ do_deletewal(time_t backup_id, bool strict, bool need_catalog_lock)
/* Lock backup catalog */ /* Lock backup catalog */
if (need_catalog_lock) if (need_catalog_lock)
catalog_lock(false); catalog_lock(false, NULL);
/* Find oldest LSN, used by backups */ /* Find oldest LSN, used by backups */
backup_list = catalog_get_backup_list(0); backup_list = catalog_get_backup_list(0);
@ -154,7 +154,7 @@ do_retention_purge(void)
elog(ERROR, "retention policy is not set"); elog(ERROR, "retention policy is not set");
/* Lock backup catalog */ /* Lock backup catalog */
catalog_lock(false); catalog_lock(false, NULL);
/* Get a complete list of backups. */ /* Get a complete list of backups. */
backup_list = catalog_get_backup_list(0); backup_list = catalog_get_backup_list(0);

View File

@ -300,7 +300,7 @@ extern parray *catalog_get_backup_list(time_t backup_id);
extern pgBackup *catalog_get_last_data_backup(parray *backup_list, extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
TimeLineID tli); TimeLineID tli);
extern int catalog_lock(bool check_catalog); extern void catalog_lock(bool check_catalog, pid_t *run_pid);
extern void pgBackupWriteConfigSection(FILE *out, pgBackup *backup); extern void pgBackupWriteConfigSection(FILE *out, pgBackup *backup);
extern void pgBackupWriteResultSection(FILE *out, pgBackup *backup); extern void pgBackupWriteResultSection(FILE *out, pgBackup *backup);

View File

@ -106,7 +106,7 @@ do_restore(time_t backup_id,
elog(LOG, "restore start"); elog(LOG, "restore start");
/* get exclusive lock of backup catalog */ /* get exclusive lock of backup catalog */
catalog_lock(false); catalog_lock(false, NULL);
/* confirm the PostgreSQL server is not running */ /* confirm the PostgreSQL server is not running */
if (is_pg_running()) if (is_pg_running())

43
show.c
View File

@ -28,7 +28,8 @@ do_show(time_t backup_id)
*/ */
if (backup_id != 0) if (backup_id != 0)
{ {
pgBackup *backup; pgBackup *backup;
pid_t run_pid;
backup = read_backup(backup_id); backup = read_backup(backup_id);
if (backup == NULL) if (backup == NULL)
@ -40,6 +41,18 @@ do_show(time_t backup_id)
/* This is not error case */ /* This is not error case */
return 0; return 0;
} }
/* Fix backup status */
if (backup->status == BACKUP_STATUS_RUNNING)
{
catalog_lock(false, &run_pid);
if (run_pid == 0)
{
backup->status = BACKUP_STATUS_ERROR;
pgBackupWriteIni(backup);
}
}
show_backup_detail(stdout, backup); show_backup_detail(stdout, backup);
/* cleanup */ /* cleanup */
@ -184,7 +197,8 @@ get_parent_tli(TimeLineID child_tli)
static void static void
show_backup_list(FILE *out, parray *backup_list) show_backup_list(FILE *out, parray *backup_list)
{ {
int i; int i;
pid_t run_pid = -1;
/* show header */ /* show header */
fputs("=========================================================================================\n", out); fputs("=========================================================================================\n", out);
@ -193,14 +207,25 @@ show_backup_list(FILE *out, parray *backup_list)
for (i = 0; i < parray_num(backup_list); i++) for (i = 0; i < parray_num(backup_list); i++)
{ {
pgBackup *backup; pgBackup *backup = parray_get(backup_list, i);
const char *modes[] = { "", "PAGE", "PTRACK", "FULL", "", "PAGE+STREAM", "PTRACK+STREAM", "FULL+STREAM"}; const char *modes[] = {"", "PAGE", "PTRACK", "FULL", "", "PAGE+STREAM", "PTRACK+STREAM", "FULL+STREAM"};
TimeLineID parent_tli; TimeLineID parent_tli;
char timestamp[20] = "----"; char timestamp[20] = "----";
char duration[20] = "----"; char duration[20] = "----";
char data_bytes_str[10] = "----"; char data_bytes_str[10] = "----";
backup = parray_get(backup_list, i); /* Fix backup status */
if (backup->status == BACKUP_STATUS_RUNNING)
{
if (run_pid == -1)
catalog_lock(false, &run_pid);
if (run_pid == 0 || i + 1 < parray_num(backup_list))
backup->status = BACKUP_STATUS_ERROR;
if (run_pid == 0)
pgBackupWriteIni(backup);
}
if (backup->recovery_time != (time_t) 0) if (backup->recovery_time != (time_t) 0)
time2iso(timestamp, lengthof(timestamp), backup->recovery_time); time2iso(timestamp, lengthof(timestamp), backup->recovery_time);

View File

@ -33,9 +33,14 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
node.stop({"-m": "immediate"}) node.stop({"-m": "immediate"})
node.cleanup() node.cleanup()
# 1 - Test recovery from latest
self.assertIn(six.b("INFO: restore complete"), self.assertIn(six.b("INFO: restore complete"),
self.restore_pb(node, options=["-j", "4", "--verbose"])) self.restore_pb(node, options=["-j", "4", "--verbose"]))
# 2 - Test that recovery.conf was created
recovery_conf = path.join(node.data_dir, "recovery.conf")
self.assertEqual(path.isfile(recovery_conf), True)
node.start({"-t": "600"}) node.start({"-t": "600"})
after = node.execute("postgres", "SELECT * FROM pgbench_branches") after = node.execute("postgres", "SELECT * FROM pgbench_branches")
@ -556,3 +561,60 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
self.assertEqual(id[0][0], 2) self.assertEqual(id[0][0], 2)
node.stop() node.stop()
def test_restore_with_tablespace_mapping_13(self):
"""recovery using tablespace-mapping option and page backup"""
node = self.make_bnode('restore_with_tablespace_mapping_13',
base_dir="tmp_dirs/restore/restore_with_tablespace_mapping_13")
node.start()
self.assertEqual(self.init_pb(node), six.b(""))
# Full backup
self.backup_pb(node)
self.assertEqual(self.show_pb(node)[0].status, six.b("OK"))
# Create tablespace
tblspc_path = path.join(node.base_dir, "tblspc")
os.makedirs(tblspc_path)
with node.connect("postgres") as con:
con.connection.autocommit = True
con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path)
con.connection.autocommit = False
con.execute("CREATE TABLE tbl AS SELECT * FROM generate_series(0,3) AS integer")
con.commit()
# First page backup
self.backup_pb(node, backup_type="page")
self.assertEqual(self.show_pb(node)[1].status, six.b("OK"))
# Create tablespace table
with node.connect("postgres") as con:
con.connection.autocommit = True
con.execute("CHECKPOINT")
con.connection.autocommit = False
con.execute("CREATE TABLE tbl1 (a int) TABLESPACE tblspc")
con.execute("INSERT INTO tbl1 SELECT * FROM generate_series(0,3) AS integer")
con.commit()
# Second page backup
self.backup_pb(node, backup_type="page")
self.assertEqual(self.show_pb(node)[2].status, six.b("OK"))
node.stop()
node.cleanup()
tblspc_path_new = path.join(node.base_dir, "tblspc_new")
self.assertIn(six.b("INFO: restore complete."),
self.restore_pb(node,
options=["-T", "%s=%s" % (tblspc_path, tblspc_path_new)]))
# Check tables
node.start()
count = node.execute("postgres", "SELECT count(*) FROM tbl")
self.assertEqual(count[0][0], 4)
count = node.execute("postgres", "SELECT count(*) FROM tbl1")
self.assertEqual(count[0][0], 4)
node.stop()

View File

@ -41,7 +41,7 @@ do_validate(time_t backup_id,
bool success_validate, bool success_validate,
need_validate_wal = true; need_validate_wal = true;
catalog_lock(false); catalog_lock(false, NULL);
rt = checkIfCreateRecoveryConf(target_time, target_xid, target_inclusive); rt = checkIfCreateRecoveryConf(target_time, target_xid, target_inclusive);
if (rt == NULL) if (rt == NULL)