2012-05-18 08:54:36 +00:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
2014-01-24 20:37:13 +09:00
|
|
|
* backup.c: backup DB cluster, archived WAL
|
2012-05-18 08:54:36 +00:00
|
|
|
*
|
2017-03-01 16:50:07 +03:00
|
|
|
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
|
|
|
* Portions Copyright (c) 2015-2017, Postgres Professional
|
2012-05-18 08:54:36 +00:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2016-11-16 20:34:21 +03:00
|
|
|
#include "pg_probackup.h"
|
2012-05-18 08:54:36 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2016-04-15 15:58:58 +03:00
|
|
|
#include <string.h>
|
2012-05-18 08:54:36 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <time.h>
|
2016-02-29 20:23:48 +03:00
|
|
|
#include <pthread.h>
|
2012-05-18 08:54:36 +00:00
|
|
|
|
|
|
|
#include "libpq/pqsignal.h"
|
2016-02-27 21:07:55 +03:00
|
|
|
#include "storage/bufpage.h"
|
2017-09-29 20:25:11 +03:00
|
|
|
#include "catalog/catalog.h"
|
|
|
|
#include "catalog/pg_tablespace.h"
|
2016-02-27 21:07:55 +03:00
|
|
|
#include "datapagemap.h"
|
2016-05-26 15:56:32 +03:00
|
|
|
#include "receivelog.h"
|
2017-08-07 16:23:37 +03:00
|
|
|
#include "streamutil.h"
|
2017-09-28 17:40:24 +03:00
|
|
|
#include "pgtar.h"
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2016-05-26 15:56:32 +03:00
|
|
|
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
|
|
|
|
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
|
2018-04-04 17:06:50 +03:00
|
|
|
static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
|
2017-05-29 18:53:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* How long we should wait for streaming end in seconds.
|
|
|
|
* Retreived as checkpoint_timeout + checkpoint_timeout * 0.1
|
|
|
|
*/
|
|
|
|
static uint32 stream_stop_timeout = 0;
|
|
|
|
/* Time in which we started to wait for streaming end */
|
|
|
|
static time_t stream_stop_begin = 0;
|
|
|
|
|
2016-11-16 20:34:21 +03:00
|
|
|
const char *progname = "pg_probackup";
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2016-01-15 23:47:38 +09:00
|
|
|
/* list of files contained in backup */
|
2017-02-25 15:12:07 +03:00
|
|
|
static parray *backup_files_list = NULL;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-05-25 14:05:48 +03:00
|
|
|
/*
|
|
|
|
* We need to wait end of WAL streaming before execute pg_stop_backup().
|
|
|
|
*/
|
2018-04-05 18:58:40 +03:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
const char *basedir;
|
2018-04-10 19:02:00 +03:00
|
|
|
PGconn *conn;
|
|
|
|
|
2018-04-05 18:58:40 +03:00
|
|
|
/*
|
|
|
|
* Return value from the thread.
|
|
|
|
* 0 means there is no error, 1 - there is an error.
|
|
|
|
*/
|
|
|
|
int ret;
|
|
|
|
} StreamThreadArg;
|
|
|
|
|
2017-05-25 14:05:48 +03:00
|
|
|
static pthread_t stream_thread;
|
2018-04-10 19:02:00 +03:00
|
|
|
static StreamThreadArg stream_thread_arg = {"", NULL, 1};
|
2016-01-15 23:47:38 +09:00
|
|
|
|
2017-04-05 19:48:55 +03:00
|
|
|
static int is_ptrack_enable = false;
|
2018-01-18 04:55:10 +03:00
|
|
|
bool is_ptrack_support = false;
|
2018-01-15 17:58:44 +03:00
|
|
|
bool is_checksum_enabled = false;
|
2018-02-06 22:15:41 +03:00
|
|
|
bool exclusive_backup = false;
|
2017-04-05 19:48:55 +03:00
|
|
|
|
2017-06-07 16:50:33 +03:00
|
|
|
/* Backup connections */
|
2017-03-21 11:54:49 +03:00
|
|
|
static PGconn *backup_conn = NULL;
|
2017-06-07 16:50:33 +03:00
|
|
|
static PGconn *master_conn = NULL;
|
2017-09-28 17:40:24 +03:00
|
|
|
static PGconn *backup_conn_replication = NULL;
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-05-22 14:22:20 +03:00
|
|
|
/* PostgreSQL server version from "backup_conn" */
|
|
|
|
static int server_version = 0;
|
2017-11-09 19:14:39 +03:00
|
|
|
static char server_version_str[100] = "";
|
2017-05-22 14:22:20 +03:00
|
|
|
|
|
|
|
/* Is pg_start_backup() was executed */
|
|
|
|
static bool backup_in_progress = false;
|
2017-11-01 12:29:28 +03:00
|
|
|
/* Is pg_stop_backup() was sent */
|
|
|
|
static bool pg_stop_backup_is_sent = false;
|
2017-05-22 14:22:20 +03:00
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/*
|
|
|
|
* Backup routines
|
|
|
|
*/
|
|
|
|
static void backup_cleanup(bool fatal, void *userdata);
|
2017-03-21 11:54:49 +03:00
|
|
|
static void backup_disconnect(bool fatal, void *userdata);
|
|
|
|
|
2016-02-29 20:23:48 +03:00
|
|
|
static void backup_files(void *arg);
|
2017-09-28 17:40:24 +03:00
|
|
|
static void remote_backup_files(void *arg);
|
|
|
|
|
2017-09-28 14:39:21 +03:00
|
|
|
static void do_backup_instance(void);
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
static void pg_start_backup(const char *label, bool smooth, pgBackup *backup);
|
2017-07-13 16:16:05 +03:00
|
|
|
static void pg_switch_wal(PGconn *conn);
|
2012-05-18 08:54:36 +00:00
|
|
|
static void pg_stop_backup(pgBackup *backup);
|
2017-05-29 18:53:48 +03:00
|
|
|
static int checkpoint_timeout(void);
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
//static void backup_list_file(parray *files, const char *root, )
|
2017-09-29 20:25:11 +03:00
|
|
|
static void parse_backup_filelist_filenames(parray *files, const char *root);
|
2017-04-18 11:41:02 +03:00
|
|
|
static void write_backup_file_list(parray *files, const char *root);
|
2017-07-13 09:39:00 +03:00
|
|
|
static void wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment);
|
2017-07-11 17:41:52 +03:00
|
|
|
static void wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup);
|
2017-03-21 11:54:49 +03:00
|
|
|
static void make_pagemap_from_ptrack(parray *files);
|
|
|
|
static void StreamLog(void *arg);
|
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
static void get_remote_pgdata_filelist(parray *files);
|
|
|
|
static void ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum);
|
|
|
|
static void remote_copy_file(PGconn *conn, pgFile* file);
|
|
|
|
|
2017-03-21 11:54:49 +03:00
|
|
|
/* Ptrack functions */
|
2016-02-27 21:07:55 +03:00
|
|
|
static void pg_ptrack_clear(void);
|
2016-10-18 16:25:13 +03:00
|
|
|
static bool pg_ptrack_support(void);
|
2016-11-22 16:07:54 +03:00
|
|
|
static bool pg_ptrack_enable(void);
|
2018-01-15 17:58:44 +03:00
|
|
|
static bool pg_checksum_enable(void);
|
2016-10-25 14:38:51 +03:00
|
|
|
static bool pg_is_in_recovery(void);
|
2017-09-29 20:25:11 +03:00
|
|
|
static bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid);
|
2016-05-11 19:35:14 +03:00
|
|
|
static char *pg_ptrack_get_and_clear(Oid tablespace_oid,
|
|
|
|
Oid db_oid,
|
|
|
|
Oid rel_oid,
|
|
|
|
size_t *result_size);
|
2017-10-02 18:31:46 +03:00
|
|
|
static XLogRecPtr get_last_ptrack_lsn(void);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
|
|
|
/* Check functions */
|
|
|
|
static void check_server_version(void);
|
2017-04-18 11:41:02 +03:00
|
|
|
static void check_system_identifiers(void);
|
2017-03-21 11:54:49 +03:00
|
|
|
static void confirm_block_size(const char *name, int blcksz);
|
2017-10-19 13:33:31 +03:00
|
|
|
static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i);
|
2016-05-26 15:56:32 +03:00
|
|
|
|
|
|
|
|
|
|
|
#define disconnect_and_exit(code) \
|
|
|
|
{ \
|
|
|
|
if (conn != NULL) PQfinish(conn); \
|
|
|
|
exit(code); \
|
|
|
|
}
|
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
/* Fill "files" with data about all the files to backup */
|
|
|
|
static void
|
|
|
|
get_remote_pgdata_filelist(parray *files)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
int resultStatus;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
backup_conn_replication = pgut_connect_replication(pgut_dbname);
|
|
|
|
|
|
|
|
if (PQsendQuery(backup_conn_replication, "FILE_BACKUP FILELIST") == 0)
|
|
|
|
elog(ERROR,"%s: could not send replication command \"%s\": %s",
|
|
|
|
PROGRAM_NAME, "FILE_BACKUP", PQerrorMessage(backup_conn_replication));
|
|
|
|
|
|
|
|
res = PQgetResult(backup_conn_replication);
|
|
|
|
|
|
|
|
if (PQresultStatus(res) != PGRES_TUPLES_OK)
|
|
|
|
{
|
|
|
|
resultStatus = PQresultStatus(res);
|
|
|
|
PQclear(res);
|
|
|
|
elog(ERROR, "cannot start getting FILE_BACKUP filelist: %s, result_status %d",
|
|
|
|
PQerrorMessage(backup_conn_replication), resultStatus);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PQntuples(res) < 1)
|
|
|
|
elog(ERROR, "%s: no data returned from server", PROGRAM_NAME);
|
|
|
|
|
|
|
|
for (i = 0; i < PQntuples(res); i++)
|
|
|
|
{
|
|
|
|
ReceiveFileList(files, backup_conn_replication, res, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
res = PQgetResult(backup_conn_replication);
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
elog(ERROR, "%s: final receive failed: %s",
|
|
|
|
PROGRAM_NAME, PQerrorMessage(backup_conn_replication));
|
|
|
|
}
|
|
|
|
|
|
|
|
PQfinish(backup_conn_replication);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* workhorse for get_remote_pgdata_filelist().
|
|
|
|
* Parse received message into pgFile structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum)
|
|
|
|
{
|
|
|
|
char filename[MAXPGPATH];
|
|
|
|
pgoff_t current_len_left = 0;
|
|
|
|
bool basetablespace;
|
|
|
|
char *copybuf = NULL;
|
|
|
|
pgFile *pgfile;
|
|
|
|
|
|
|
|
/* What for do we need this basetablespace field?? */
|
|
|
|
basetablespace = PQgetisnull(res, rownum, 0);
|
|
|
|
if (basetablespace)
|
|
|
|
elog(LOG,"basetablespace");
|
|
|
|
else
|
|
|
|
elog(LOG, "basetablespace %s", PQgetvalue(res, rownum, 1));
|
|
|
|
|
|
|
|
res = PQgetResult(conn);
|
|
|
|
|
|
|
|
if (PQresultStatus(res) != PGRES_COPY_OUT)
|
|
|
|
elog(ERROR, "Could not get COPY data stream: %s", PQerrorMessage(conn));
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
int filemode;
|
|
|
|
|
|
|
|
if (copybuf != NULL)
|
|
|
|
{
|
|
|
|
PQfreemem(copybuf);
|
|
|
|
copybuf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = PQgetCopyData(conn, ©buf, 0);
|
|
|
|
|
|
|
|
if (r == -2)
|
|
|
|
elog(ERROR, "Could not read COPY data: %s", PQerrorMessage(conn));
|
|
|
|
|
|
|
|
/* end of copy */
|
|
|
|
if (r == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* This must be the header for a new file */
|
|
|
|
if (r != 512)
|
|
|
|
elog(ERROR, "Invalid tar block header size: %d\n", r);
|
|
|
|
|
|
|
|
current_len_left = read_tar_number(©buf[124], 12);
|
|
|
|
|
|
|
|
/* Set permissions on the file */
|
|
|
|
filemode = read_tar_number(©buf[100], 8);
|
|
|
|
|
|
|
|
/* First part of header is zero terminated filename */
|
|
|
|
snprintf(filename, sizeof(filename), "%s", copybuf);
|
|
|
|
|
|
|
|
pgfile = pgFileInit(filename);
|
|
|
|
pgfile->size = current_len_left;
|
|
|
|
pgfile->mode |= filemode;
|
|
|
|
|
|
|
|
if (filename[strlen(filename) - 1] == '/')
|
|
|
|
{
|
|
|
|
/* Symbolic link or directory has size zero */
|
|
|
|
Assert (pgfile->size == 0);
|
|
|
|
/* Ends in a slash means directory or symlink to directory */
|
|
|
|
if (copybuf[156] == '5')
|
|
|
|
{
|
|
|
|
/* Directory */
|
2017-10-11 16:43:09 +03:00
|
|
|
pgfile->mode |= S_IFDIR;
|
2017-09-28 17:40:24 +03:00
|
|
|
}
|
|
|
|
else if (copybuf[156] == '2')
|
|
|
|
{
|
|
|
|
/* Symlink */
|
2017-10-11 16:43:09 +03:00
|
|
|
pgfile->mode |= S_IFLNK;
|
2017-09-28 17:40:24 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
elog(ERROR, "Unrecognized link indicator \"%c\"\n",
|
|
|
|
copybuf[156]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* regular file */
|
2017-10-11 16:43:09 +03:00
|
|
|
pgfile->mode |= S_IFREG;
|
2017-09-28 17:40:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
parray_append(files, pgfile);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copybuf != NULL)
|
|
|
|
PQfreemem(copybuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read one file via replication protocol
|
|
|
|
* and write it to the destination subdir in 'backup_path' */
|
2018-04-24 17:45:30 +03:00
|
|
|
static void
|
|
|
|
remote_copy_file(PGconn *conn, pgFile* file)
|
2017-09-28 17:40:24 +03:00
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
char *copybuf = NULL;
|
|
|
|
char buf[32768];
|
|
|
|
FILE *out;
|
|
|
|
char database_path[MAXPGPATH];
|
|
|
|
char to_path[MAXPGPATH];
|
|
|
|
bool skip_padding = false;
|
|
|
|
|
|
|
|
pgBackupGetPath(¤t, database_path, lengthof(database_path),
|
|
|
|
DATABASE_DIR);
|
|
|
|
join_path_components(to_path, database_path, file->path);
|
|
|
|
|
|
|
|
out = fopen(to_path, "w");
|
|
|
|
if (out == NULL)
|
|
|
|
{
|
|
|
|
int errno_tmp = errno;
|
|
|
|
elog(ERROR, "cannot open destination file \"%s\": %s",
|
|
|
|
to_path, strerror(errno_tmp));
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_CRC32C(file->crc);
|
|
|
|
|
|
|
|
/* read from stream and write to backup file */
|
|
|
|
while (1)
|
|
|
|
{
|
|
|
|
int row_length;
|
|
|
|
int errno_tmp;
|
|
|
|
int write_buffer_size = 0;
|
|
|
|
if (copybuf != NULL)
|
|
|
|
{
|
|
|
|
PQfreemem(copybuf);
|
|
|
|
copybuf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
row_length = PQgetCopyData(conn, ©buf, 0);
|
|
|
|
|
|
|
|
if (row_length == -2)
|
|
|
|
elog(ERROR, "Could not read COPY data: %s", PQerrorMessage(conn));
|
|
|
|
|
|
|
|
if (row_length == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!skip_padding)
|
|
|
|
{
|
|
|
|
write_buffer_size = Min(row_length, sizeof(buf));
|
|
|
|
memcpy(buf, copybuf, write_buffer_size);
|
|
|
|
COMP_CRC32C(file->crc, &buf, write_buffer_size);
|
|
|
|
|
|
|
|
/* TODO calc checksum*/
|
|
|
|
if (fwrite(buf, 1, write_buffer_size, out) != write_buffer_size)
|
|
|
|
{
|
|
|
|
errno_tmp = errno;
|
|
|
|
/* oops */
|
|
|
|
FIN_CRC32C(file->crc);
|
|
|
|
fclose(out);
|
|
|
|
PQfinish(conn);
|
|
|
|
elog(ERROR, "cannot write to \"%s\": %s", to_path,
|
|
|
|
strerror(errno_tmp));
|
|
|
|
}
|
|
|
|
|
|
|
|
file->read_size += write_buffer_size;
|
|
|
|
}
|
|
|
|
if (file->read_size >= file->size)
|
|
|
|
{
|
|
|
|
skip_padding = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
res = PQgetResult(conn);
|
|
|
|
|
|
|
|
/* File is not found. That's normal. */
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
elog(ERROR, "final receive failed: status %d ; %s",PQresultStatus(res), PQerrorMessage(conn));
|
|
|
|
}
|
|
|
|
|
|
|
|
file->write_size = file->read_size;
|
|
|
|
FIN_CRC32C(file->crc);
|
|
|
|
|
|
|
|
fclose(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take a remote backup of the PGDATA at a file level.
|
|
|
|
* Copy all directories and files listed in backup_files_list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
remote_backup_files(void *arg)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
backup_files_args *arguments = (backup_files_args *) arg;
|
|
|
|
int n_backup_files_list = parray_num(arguments->backup_files_list);
|
|
|
|
PGconn *file_backup_conn = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < n_backup_files_list; i++)
|
|
|
|
{
|
|
|
|
char *query_str;
|
|
|
|
PGresult *res;
|
|
|
|
char *copybuf = NULL;
|
|
|
|
pgFile *file;
|
|
|
|
int row_length;
|
|
|
|
|
|
|
|
file = (pgFile *) parray_get(arguments->backup_files_list, i);
|
|
|
|
|
|
|
|
/* We have already copied all directories */
|
|
|
|
if (S_ISDIR(file->mode))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
file_backup_conn = pgut_connect_replication(pgut_dbname);
|
|
|
|
|
|
|
|
/* check for interrupt */
|
|
|
|
if (interrupted)
|
|
|
|
elog(ERROR, "interrupted during backup");
|
|
|
|
|
|
|
|
query_str = psprintf("FILE_BACKUP FILEPATH '%s'",file->path);
|
|
|
|
|
|
|
|
if (PQsendQuery(file_backup_conn, query_str) == 0)
|
|
|
|
elog(ERROR,"%s: could not send replication command \"%s\": %s",
|
|
|
|
PROGRAM_NAME, query_str, PQerrorMessage(file_backup_conn));
|
|
|
|
|
|
|
|
res = PQgetResult(file_backup_conn);
|
|
|
|
|
|
|
|
/* File is not found. That's normal. */
|
|
|
|
if (PQresultStatus(res) == PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
PQclear(res);
|
|
|
|
PQfinish(file_backup_conn);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PQresultStatus(res) != PGRES_COPY_OUT)
|
|
|
|
{
|
|
|
|
PQclear(res);
|
|
|
|
PQfinish(file_backup_conn);
|
|
|
|
elog(ERROR, "Could not get COPY data stream: %s", PQerrorMessage(file_backup_conn));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read the header of the file */
|
|
|
|
row_length = PQgetCopyData(file_backup_conn, ©buf, 0);
|
|
|
|
|
|
|
|
if (row_length == -2)
|
|
|
|
elog(ERROR, "Could not read COPY data: %s", PQerrorMessage(file_backup_conn));
|
|
|
|
|
|
|
|
/* end of copy TODO handle it */
|
|
|
|
if (row_length == -1)
|
|
|
|
elog(ERROR, "Unexpected end of COPY data");
|
|
|
|
|
|
|
|
if(row_length != 512)
|
|
|
|
elog(ERROR, "Invalid tar block header size: %d\n", row_length);
|
|
|
|
file->size = read_tar_number(©buf[124], 12);
|
|
|
|
|
|
|
|
/* receive the data from stream and write to backup file */
|
|
|
|
remote_copy_file(file_backup_conn, file);
|
|
|
|
|
2017-12-08 16:24:35 +03:00
|
|
|
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
|
2017-09-28 17:40:24 +03:00
|
|
|
file->path, (unsigned long) file->write_size);
|
|
|
|
PQfinish(file_backup_conn);
|
|
|
|
}
|
2018-04-05 18:58:40 +03:00
|
|
|
|
|
|
|
/* Data files transferring is successful */
|
|
|
|
arguments->ret = 0;
|
2017-09-28 17:40:24 +03:00
|
|
|
}
|
2012-05-18 08:54:36 +00:00
|
|
|
|
|
|
|
/*
|
2017-09-28 14:39:21 +03:00
|
|
|
* Take a backup of a single postgresql instance.
|
|
|
|
* Move files from 'pgdata' to a subdirectory in 'backup_path'.
|
2012-05-18 08:54:36 +00:00
|
|
|
*/
|
2017-04-18 11:41:02 +03:00
|
|
|
static void
|
2017-09-28 14:39:21 +03:00
|
|
|
do_backup_instance(void)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2017-09-28 17:40:24 +03:00
|
|
|
int i;
|
2017-02-25 15:12:07 +03:00
|
|
|
char database_path[MAXPGPATH];
|
2016-05-26 15:56:32 +03:00
|
|
|
char dst_backup_path[MAXPGPATH];
|
2012-05-18 08:54:36 +00:00
|
|
|
char label[1024];
|
2017-04-20 15:08:07 +03:00
|
|
|
XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2016-02-29 20:23:48 +03:00
|
|
|
pthread_t backup_threads[num_threads];
|
|
|
|
backup_files_args *backup_threads_args[num_threads];
|
2018-04-05 18:58:40 +03:00
|
|
|
bool backup_isok = true;
|
2013-09-09 09:00:13 +00:00
|
|
|
|
2016-01-15 23:47:38 +09:00
|
|
|
pgBackup *prev_backup = NULL;
|
2017-04-18 11:41:02 +03:00
|
|
|
char prev_backup_filelist_path[MAXPGPATH];
|
|
|
|
parray *prev_backup_filelist = NULL;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
elog(LOG, "Database backup start");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2013-12-15 23:41:42 +09:00
|
|
|
/* Initialize size summary */
|
2014-01-10 04:11:27 +09:00
|
|
|
current.data_bytes = 0;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
/* Obtain current timeline */
|
|
|
|
if (is_remote_backup)
|
|
|
|
{
|
|
|
|
char *sysidentifier;
|
|
|
|
TimeLineID starttli;
|
|
|
|
XLogRecPtr startpos;
|
|
|
|
|
|
|
|
backup_conn_replication = pgut_connect_replication(pgut_dbname);
|
|
|
|
|
|
|
|
/* Check replication prorocol connection */
|
|
|
|
if (!RunIdentifySystem(backup_conn_replication, &sysidentifier, &starttli, &startpos, NULL))
|
|
|
|
elog(ERROR, "Failed to send command for remote backup");
|
|
|
|
|
|
|
|
// TODO implement the check
|
|
|
|
// if (&sysidentifier != system_identifier)
|
|
|
|
// elog(ERROR, "Backup data directory was initialized for system id %ld, but target backup directory system id is %ld",
|
|
|
|
// system_identifier, sysidentifier);
|
2013-12-13 03:55:39 +09:00
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
current.tli = starttli;
|
|
|
|
|
|
|
|
PQfinish(backup_conn_replication);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
current.tli = get_current_timeline(false);
|
2013-12-25 05:27:25 +09:00
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* In incremental backup mode ensure that already-validated
|
2017-09-28 14:39:21 +03:00
|
|
|
* backup on current timeline exists and get its filelist.
|
2013-12-25 05:27:25 +09:00
|
|
|
*/
|
2016-02-27 21:07:55 +03:00
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE ||
|
2018-02-26 17:53:29 +03:00
|
|
|
current.backup_mode == BACKUP_MODE_DIFF_PTRACK ||
|
|
|
|
current.backup_mode == BACKUP_MODE_DIFF_DELTA)
|
2013-12-25 05:27:25 +09:00
|
|
|
{
|
2017-09-28 14:39:21 +03:00
|
|
|
parray *backup_list;
|
|
|
|
/* get list of backups already taken */
|
|
|
|
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
|
|
|
|
if (backup_list == NULL)
|
|
|
|
elog(ERROR, "Failed to get backup list.");
|
|
|
|
|
2013-12-25 05:27:25 +09:00
|
|
|
prev_backup = catalog_get_last_data_backup(backup_list, current.tli);
|
|
|
|
if (prev_backup == NULL)
|
2017-04-20 15:01:29 +03:00
|
|
|
elog(ERROR, "Valid backup on current timeline is not found. "
|
2017-04-18 11:41:02 +03:00
|
|
|
"Create new FULL backup before an incremental one.");
|
2017-09-28 14:39:21 +03:00
|
|
|
parray_free(backup_list);
|
|
|
|
|
|
|
|
pgBackupGetPath(prev_backup, prev_backup_filelist_path, lengthof(prev_backup_filelist_path),
|
|
|
|
DATABASE_FILE_LIST);
|
2018-03-06 16:10:58 +03:00
|
|
|
/* Files of previous backup needed by DELTA backup */
|
|
|
|
prev_backup_filelist = dir_read_file_list(NULL, prev_backup_filelist_path);
|
2017-09-28 14:39:21 +03:00
|
|
|
|
|
|
|
/* If lsn is not NULL, only pages with higher lsn will be copied. */
|
|
|
|
prev_backup_start_lsn = prev_backup->start_lsn;
|
|
|
|
current.parent_backup = prev_backup->start_time;
|
|
|
|
|
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2013-12-25 05:27:25 +09:00
|
|
|
}
|
|
|
|
|
2017-12-08 16:22:06 +03:00
|
|
|
/*
|
|
|
|
* It`s illegal to take PTRACK backup if LSN from ptrack_control() is not equal to
|
2018-02-06 20:37:45 +03:00
|
|
|
* stort_backup LSN of previous backup
|
2017-12-08 16:22:06 +03:00
|
|
|
*/
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
|
|
|
{
|
|
|
|
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn();
|
|
|
|
|
|
|
|
if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr)
|
|
|
|
{
|
|
|
|
elog(ERROR, "LSN from ptrack_control %lx differs from STOP LSN of previous backup %lx.\n"
|
|
|
|
"Create new full backup before an incremental one.",
|
|
|
|
ptrack_lsn, prev_backup->stop_lsn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Clear ptrack files for FULL and PAGE backup */
|
2017-04-05 19:48:55 +03:00
|
|
|
if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && is_ptrack_enable)
|
2016-05-11 21:49:26 +03:00
|
|
|
pg_ptrack_clear();
|
|
|
|
|
2016-10-13 17:25:53 +03:00
|
|
|
/* notify start of backup to PostgreSQL server */
|
|
|
|
time2iso(label, lengthof(label), current.start_time);
|
2016-11-16 20:34:21 +03:00
|
|
|
strncat(label, " with pg_probackup", lengthof(label));
|
2016-10-13 17:25:53 +03:00
|
|
|
pg_start_backup(label, smooth_checkpoint, ¤t);
|
|
|
|
|
2017-05-25 14:05:48 +03:00
|
|
|
pgBackupGetPath(¤t, database_path, lengthof(database_path),
|
|
|
|
DATABASE_DIR);
|
|
|
|
|
|
|
|
/* start stream replication */
|
|
|
|
if (stream_wal)
|
|
|
|
{
|
|
|
|
join_path_components(dst_backup_path, database_path, PG_XLOG_DIR);
|
|
|
|
dir_create_dir(dst_backup_path, DIR_PERMISSION);
|
|
|
|
|
2018-04-05 18:58:40 +03:00
|
|
|
stream_thread_arg.basedir = dst_backup_path;
|
2018-04-10 19:02:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Connect in replication mode to the server.
|
|
|
|
*/
|
|
|
|
stream_thread_arg.conn = pgut_connect_replication(pgut_dbname);
|
|
|
|
|
|
|
|
if (!CheckServerVersionForStreaming(stream_thread_arg.conn))
|
|
|
|
{
|
|
|
|
PQfinish(stream_thread_arg.conn);
|
|
|
|
/*
|
|
|
|
* Error message already written in CheckServerVersionForStreaming().
|
|
|
|
* There's no hope of recovering from a version mismatch, so don't
|
|
|
|
* retry.
|
|
|
|
*/
|
|
|
|
elog(ERROR, "Cannot continue backup because stream connect has failed.");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Identify server, obtaining start LSN position and current timeline ID
|
|
|
|
* at the same time, necessary if not valid data can be found in the
|
|
|
|
* existing output directory.
|
|
|
|
*/
|
|
|
|
if (!RunIdentifySystem(stream_thread_arg.conn, NULL, NULL, NULL, NULL))
|
|
|
|
{
|
|
|
|
PQfinish(stream_thread_arg.conn);
|
2017-05-25 14:05:48 +03:00
|
|
|
elog(ERROR, "Cannot continue backup because stream connect has failed.");
|
2018-04-10 19:02:00 +03:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:58:40 +03:00
|
|
|
/* By default there are some error */
|
|
|
|
stream_thread_arg.ret = 1;
|
2017-05-25 14:05:48 +03:00
|
|
|
|
2018-04-05 18:58:40 +03:00
|
|
|
pthread_create(&stream_thread, NULL, (void *(*)(void *)) StreamLog,
|
|
|
|
&stream_thread_arg);
|
2017-05-25 14:05:48 +03:00
|
|
|
}
|
|
|
|
|
2016-01-15 23:47:38 +09:00
|
|
|
/* initialize backup list */
|
|
|
|
backup_files_list = parray_new();
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2016-01-15 15:26:05 +09:00
|
|
|
/* list files with the logical path. omit $PGDATA */
|
2017-09-28 17:40:24 +03:00
|
|
|
if (is_remote_backup)
|
|
|
|
get_remote_pgdata_filelist(backup_files_list);
|
|
|
|
else
|
|
|
|
dir_list_file(backup_files_list, pgdata, true, true, false);
|
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
/* Extract information about files in backup_list parsing their names:*/
|
|
|
|
parse_backup_filelist_filenames(backup_files_list, pgdata);
|
2016-01-15 23:47:38 +09:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
if (current.backup_mode != BACKUP_MODE_FULL)
|
2016-01-15 23:47:38 +09:00
|
|
|
{
|
|
|
|
elog(LOG, "current_tli:%X", current.tli);
|
|
|
|
elog(LOG, "prev_backup->start_lsn: %X/%X",
|
2017-04-18 11:41:02 +03:00
|
|
|
(uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn));
|
2016-01-15 23:47:38 +09:00
|
|
|
elog(LOG, "current.start_lsn: %X/%X",
|
2017-04-18 11:41:02 +03:00
|
|
|
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build page mapping in incremental mode.
|
|
|
|
*/
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Build the page map. Obtain information about changed pages
|
|
|
|
* reading WAL segments present in archives up to the point
|
|
|
|
* where this backup has started.
|
|
|
|
*/
|
2016-01-15 23:47:38 +09:00
|
|
|
extractPageMap(arclog_path, prev_backup->start_lsn, current.tli,
|
2017-03-23 17:00:43 +03:00
|
|
|
current.start_lsn,
|
|
|
|
/*
|
|
|
|
* For backup from master wait for previous segment.
|
|
|
|
* For backup from replica wait for current segment.
|
|
|
|
*/
|
2018-03-16 11:53:43 +03:00
|
|
|
!from_replica, backup_files_list);
|
2016-01-15 23:47:38 +09:00
|
|
|
}
|
2017-09-28 15:12:12 +03:00
|
|
|
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
2016-02-27 21:07:55 +03:00
|
|
|
{
|
2017-09-29 20:25:11 +03:00
|
|
|
parray_qsort(backup_files_list, pgFileComparePath);
|
2016-02-27 21:07:55 +03:00
|
|
|
make_pagemap_from_ptrack(backup_files_list);
|
|
|
|
}
|
|
|
|
|
2017-04-21 14:54:33 +03:00
|
|
|
/*
|
|
|
|
* Sort pathname ascending. It is necessary to create intermediate
|
|
|
|
* directories sequentially.
|
|
|
|
*
|
|
|
|
* For example:
|
|
|
|
* 1 - create 'base'
|
|
|
|
* 2 - create 'base/1'
|
|
|
|
*/
|
2016-02-29 20:23:48 +03:00
|
|
|
parray_qsort(backup_files_list, pgFileComparePath);
|
|
|
|
|
2017-04-21 11:52:12 +03:00
|
|
|
/*
|
|
|
|
* Make directories before backup
|
2017-04-18 11:41:02 +03:00
|
|
|
* and setup threads at the same time
|
|
|
|
*/
|
2016-02-29 20:23:48 +03:00
|
|
|
for (i = 0; i < parray_num(backup_files_list); i++)
|
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(backup_files_list, i);
|
2016-02-29 20:23:48 +03:00
|
|
|
|
|
|
|
/* if the entry was a directory, create it in the backup */
|
2017-02-25 15:12:07 +03:00
|
|
|
if (S_ISDIR(file->mode))
|
2016-02-29 20:23:48 +03:00
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
char dirpath[MAXPGPATH];
|
2017-09-28 17:40:24 +03:00
|
|
|
char *dir_name;
|
|
|
|
char database_path[MAXPGPATH];
|
|
|
|
|
|
|
|
if (!is_remote_backup)
|
|
|
|
dir_name = GetRelativePath(file->path, pgdata);
|
|
|
|
else
|
|
|
|
dir_name = file->path;
|
2017-02-25 15:12:07 +03:00
|
|
|
|
2018-01-24 05:00:49 +03:00
|
|
|
elog(VERBOSE, "Create directory \"%s\"", dir_name);
|
2017-09-28 17:40:24 +03:00
|
|
|
pgBackupGetPath(¤t, database_path, lengthof(database_path),
|
|
|
|
DATABASE_DIR);
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-02-25 15:12:07 +03:00
|
|
|
join_path_components(dirpath, database_path, dir_name);
|
|
|
|
dir_create_dir(dirpath, DIR_PERMISSION);
|
2016-02-29 20:23:48 +03:00
|
|
|
}
|
2016-09-02 20:38:39 +03:00
|
|
|
|
2017-09-28 15:12:12 +03:00
|
|
|
/* setup threads */
|
2016-09-02 20:38:39 +03:00
|
|
|
__sync_lock_release(&file->lock);
|
2016-02-29 20:23:48 +03:00
|
|
|
}
|
|
|
|
|
2016-09-02 19:31:49 +03:00
|
|
|
/* sort by size for load balancing */
|
|
|
|
parray_qsort(backup_files_list, pgFileCompareSize);
|
|
|
|
|
|
|
|
/* init thread args with own file lists */
|
2016-02-29 20:23:48 +03:00
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
backup_files_args *arg = pg_malloc(sizeof(backup_files_args));
|
2017-02-25 15:12:07 +03:00
|
|
|
|
2016-02-29 20:23:48 +03:00
|
|
|
arg->from_root = pgdata;
|
2017-02-25 15:12:07 +03:00
|
|
|
arg->to_root = database_path;
|
2017-04-18 11:41:02 +03:00
|
|
|
arg->backup_files_list = backup_files_list;
|
|
|
|
arg->prev_backup_filelist = prev_backup_filelist;
|
|
|
|
arg->prev_backup_start_lsn = prev_backup_start_lsn;
|
2018-01-16 14:16:50 +03:00
|
|
|
arg->thread_backup_conn = NULL;
|
|
|
|
arg->thread_cancel_conn = NULL;
|
2018-04-05 18:58:40 +03:00
|
|
|
/* By default there are some error */
|
|
|
|
arg->ret = 1;
|
2016-09-02 19:31:49 +03:00
|
|
|
backup_threads_args[i] = arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Run threads */
|
2018-02-06 20:37:45 +03:00
|
|
|
elog(LOG, "Start transfering data files");
|
2016-09-02 19:31:49 +03:00
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
2018-01-23 13:43:31 +03:00
|
|
|
elog(VERBOSE, "Start thread num: %i", i);
|
2017-09-28 17:40:24 +03:00
|
|
|
|
|
|
|
if (!is_remote_backup)
|
|
|
|
pthread_create(&backup_threads[i], NULL,
|
|
|
|
(void *(*)(void *)) backup_files,
|
|
|
|
backup_threads_args[i]);
|
|
|
|
else
|
|
|
|
pthread_create(&backup_threads[i], NULL,
|
|
|
|
(void *(*)(void *)) remote_backup_files,
|
|
|
|
backup_threads_args[i]);
|
2016-02-29 20:23:48 +03:00
|
|
|
}
|
|
|
|
|
2017-05-25 14:05:48 +03:00
|
|
|
/* Wait threads */
|
2016-02-29 20:23:48 +03:00
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
pthread_join(backup_threads[i], NULL);
|
2018-04-05 18:58:40 +03:00
|
|
|
if (backup_threads_args[i]->ret == 1)
|
|
|
|
backup_isok = false;
|
|
|
|
|
2016-02-29 20:23:48 +03:00
|
|
|
pg_free(backup_threads_args[i]);
|
|
|
|
}
|
2018-04-05 18:58:40 +03:00
|
|
|
if (backup_isok)
|
|
|
|
elog(LOG, "Data files are transfered");
|
|
|
|
else
|
|
|
|
elog(ERROR, "Data files transferring failed");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-02-25 15:12:07 +03:00
|
|
|
/* clean previous backup file list */
|
2017-04-18 11:41:02 +03:00
|
|
|
if (prev_backup_filelist)
|
2017-02-25 15:12:07 +03:00
|
|
|
{
|
2017-04-18 11:41:02 +03:00
|
|
|
parray_walk(prev_backup_filelist, pgFileFree);
|
|
|
|
parray_free(prev_backup_filelist);
|
2017-02-25 15:12:07 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 21:07:55 +03:00
|
|
|
/* Notify end of backup */
|
2016-01-15 15:26:05 +09:00
|
|
|
pg_stop_backup(¤t);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Add archived xlog files into the list of files of this backup */
|
2016-05-26 15:56:32 +03:00
|
|
|
if (stream_wal)
|
|
|
|
{
|
2017-04-18 11:41:02 +03:00
|
|
|
parray *xlog_files_list;
|
2017-02-25 15:12:07 +03:00
|
|
|
char pg_xlog_path[MAXPGPATH];
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Scan backup PG_XLOG_DIR */
|
|
|
|
xlog_files_list = parray_new();
|
2017-03-24 16:58:35 +03:00
|
|
|
join_path_components(pg_xlog_path, database_path, PG_XLOG_DIR);
|
2017-04-18 11:41:02 +03:00
|
|
|
dir_list_file(xlog_files_list, pg_xlog_path, false, true, false);
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
for (i = 0; i < parray_num(xlog_files_list); i++)
|
2016-05-26 15:56:32 +03:00
|
|
|
{
|
2017-04-18 11:41:02 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(xlog_files_list, i);
|
2017-09-28 17:40:24 +03:00
|
|
|
if (S_ISREG(file->mode))
|
|
|
|
calc_file_checksum(file);
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Remove file path root prefix*/
|
2017-02-25 15:12:07 +03:00
|
|
|
if (strstr(file->path, database_path) == file->path)
|
2016-05-26 15:56:32 +03:00
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
char *ptr = file->path;
|
2017-04-18 11:41:02 +03:00
|
|
|
file->path = pstrdup(GetRelativePath(ptr, database_path));
|
2016-05-26 15:56:32 +03:00
|
|
|
free(ptr);
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 11:41:02 +03:00
|
|
|
|
|
|
|
/* Add xlog files into the list of backed up files */
|
|
|
|
parray_concat(backup_files_list, xlog_files_list);
|
|
|
|
parray_free(xlog_files_list);
|
2016-05-26 15:56:32 +03:00
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Print the list of files to backup catalog */
|
|
|
|
write_backup_file_list(backup_files_list, pgdata);
|
2013-12-10 03:21:07 +09:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Compute summary of size of regular files in the backup */
|
2016-01-15 23:47:38 +09:00
|
|
|
for (i = 0; i < parray_num(backup_files_list); i++)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(backup_files_list, i);
|
|
|
|
|
2017-06-16 12:52:27 +03:00
|
|
|
if (S_ISDIR(file->mode))
|
|
|
|
current.data_bytes += 4096;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Count the amount of the data actually copied */
|
2017-06-16 12:52:27 +03:00
|
|
|
if (S_ISREG(file->mode))
|
|
|
|
current.data_bytes += file->write_size;
|
2017-04-18 11:41:02 +03:00
|
|
|
}
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-07-11 13:18:23 +03:00
|
|
|
parray_walk(backup_files_list, pgFileFree);
|
2017-04-18 11:41:02 +03:00
|
|
|
parray_free(backup_files_list);
|
2017-07-11 13:18:23 +03:00
|
|
|
backup_files_list = NULL;
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
|
|
|
* Entry point of pg_probackup BACKUP subcommand.
|
|
|
|
*/
|
2012-05-18 08:54:36 +00:00
|
|
|
int
|
2017-11-15 19:21:30 +03:00
|
|
|
do_backup(time_t start_time)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2017-10-02 21:05:24 +03:00
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/* PGDATA and BACKUP_MODE are always required */
|
|
|
|
if (pgdata == NULL)
|
2017-04-05 19:48:55 +03:00
|
|
|
elog(ERROR, "required parameter not specified: PGDATA "
|
2016-01-14 16:36:39 +09:00
|
|
|
"(-D, --pgdata)");
|
2012-05-18 08:54:36 +00:00
|
|
|
if (current.backup_mode == BACKUP_MODE_INVALID)
|
2017-04-05 19:48:55 +03:00
|
|
|
elog(ERROR, "required parameter not specified: BACKUP_MODE "
|
2016-01-14 16:36:39 +09:00
|
|
|
"(-b, --backup-mode)");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-03-21 11:54:49 +03:00
|
|
|
/* Create connection for PostgreSQL */
|
|
|
|
backup_conn = pgut_connect(pgut_dbname);
|
|
|
|
pgut_atexit_push(backup_disconnect, NULL);
|
|
|
|
|
2018-03-20 15:49:43 +03:00
|
|
|
current.primary_conninfo = pgut_get_conninfo_string(backup_conn);
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Confirm data block size and xlog block size are compatible */
|
|
|
|
confirm_block_size("block_size", BLCKSZ);
|
|
|
|
confirm_block_size("wal_block_size", XLOG_BLCKSZ);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-03-13 16:22:48 +03:00
|
|
|
from_replica = pg_is_in_recovery();
|
2017-05-22 14:22:20 +03:00
|
|
|
|
|
|
|
/* Confirm that this server version is supported */
|
|
|
|
check_server_version();
|
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
/* TODO fix it for remote backup*/
|
|
|
|
if (!is_remote_backup)
|
|
|
|
current.checksum_version = get_data_checksum_version(true);
|
2018-01-15 17:58:44 +03:00
|
|
|
|
|
|
|
is_checksum_enabled = pg_checksum_enable();
|
2018-02-06 20:37:45 +03:00
|
|
|
|
|
|
|
if (is_checksum_enabled)
|
2018-03-26 19:50:49 +03:00
|
|
|
elog(LOG, "This PostgreSQL instance was initialized with data block checksums. "
|
2018-02-06 20:37:45 +03:00
|
|
|
"Data block corruption will be detected");
|
|
|
|
else
|
2018-03-26 19:50:49 +03:00
|
|
|
elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. "
|
2018-02-06 20:37:45 +03:00
|
|
|
"pg_probackup have no way to detect data block corruption without them. "
|
|
|
|
"Reinitialize PGDATA with option '--data-checksums'.");
|
2018-01-15 17:58:44 +03:00
|
|
|
|
2017-11-09 19:14:39 +03:00
|
|
|
StrNCpy(current.server_version, server_version_str,
|
|
|
|
sizeof(current.server_version));
|
2017-04-18 11:41:02 +03:00
|
|
|
current.stream = stream_wal;
|
2017-03-13 16:22:48 +03:00
|
|
|
|
2017-10-02 21:05:24 +03:00
|
|
|
is_ptrack_support = pg_ptrack_support();
|
2017-10-05 12:03:42 +03:00
|
|
|
if (is_ptrack_support)
|
|
|
|
{
|
|
|
|
is_ptrack_enable = pg_ptrack_enable();
|
|
|
|
}
|
2017-09-28 14:39:21 +03:00
|
|
|
|
2017-10-03 17:57:48 +03:00
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
2017-10-02 21:05:24 +03:00
|
|
|
{
|
2017-10-03 17:57:48 +03:00
|
|
|
if (!is_ptrack_support)
|
|
|
|
elog(ERROR, "This PostgreSQL instance does not support ptrack");
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if(!is_ptrack_enable)
|
|
|
|
elog(ERROR, "Ptrack is disabled");
|
|
|
|
}
|
2017-04-11 17:56:46 +03:00
|
|
|
}
|
2017-04-05 19:48:55 +03:00
|
|
|
|
2017-06-07 16:50:33 +03:00
|
|
|
if (from_replica)
|
|
|
|
{
|
|
|
|
/* Check master connection options */
|
|
|
|
if (master_host == NULL)
|
|
|
|
elog(ERROR, "Options for connection to master must be provided to perform backup from replica");
|
|
|
|
|
|
|
|
/* Create connection to master server */
|
2017-09-01 11:51:49 +03:00
|
|
|
master_conn = pgut_connect_extended(master_host, master_port, master_db, master_user);
|
2017-06-07 16:50:33 +03:00
|
|
|
}
|
|
|
|
|
2017-04-05 19:48:55 +03:00
|
|
|
/* Get exclusive lock of backup catalog */
|
2017-04-18 11:41:02 +03:00
|
|
|
catalog_lock();
|
2017-04-05 19:48:55 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
|
|
|
* Ensure that backup directory was initialized for the same PostgreSQL
|
|
|
|
* instance we opened connection to. And that target backup database PGDATA
|
|
|
|
* belogns to the same instance.
|
|
|
|
*/
|
2017-09-28 17:40:24 +03:00
|
|
|
/* TODO fix it for remote backup */
|
|
|
|
if (!is_remote_backup)
|
|
|
|
check_system_identifiers();
|
2017-04-05 19:48:55 +03:00
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Start backup. Update backup status. */
|
2012-05-18 08:54:36 +00:00
|
|
|
current.status = BACKUP_STATUS_RUNNING;
|
2017-11-15 19:21:30 +03:00
|
|
|
current.start_time = start_time;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-19 12:01:10 +03:00
|
|
|
/* Create backup directory and BACKUP_CONTROL_FILE */
|
2017-04-12 17:39:20 +03:00
|
|
|
if (pgBackupCreateDir(¤t))
|
|
|
|
elog(ERROR, "cannot create backup directory");
|
2017-04-19 12:01:10 +03:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2017-04-12 17:39:20 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
elog(LOG, "Backup destination is initialized");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-20 14:14:14 +03:00
|
|
|
/* set the error processing function for the backup process */
|
|
|
|
pgut_atexit_push(backup_cleanup, NULL);
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/* backup data */
|
2017-09-28 14:39:21 +03:00
|
|
|
do_backup_instance();
|
2012-05-18 08:54:36 +00:00
|
|
|
pgut_atexit_pop(backup_cleanup, NULL);
|
2013-12-10 03:21:07 +09:00
|
|
|
|
2017-06-16 12:52:27 +03:00
|
|
|
/* compute size of wal files of this backup stored in the archive */
|
|
|
|
if (!current.stream)
|
|
|
|
{
|
|
|
|
current.wal_bytes = XLOG_SEG_SIZE *
|
|
|
|
(current.stop_lsn/XLogSegSize - current.start_lsn/XLogSegSize + 1);
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Backup is done. Update backup status */
|
2012-05-18 08:54:36 +00:00
|
|
|
current.end_time = time(NULL);
|
|
|
|
current.status = BACKUP_STATUS_DONE;
|
2017-04-19 12:01:10 +03:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-11-22 17:04:46 +03:00
|
|
|
//elog(LOG, "Backup completed. Total bytes : " INT64_FORMAT "",
|
|
|
|
// current.data_bytes);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-14 12:51:05 +03:00
|
|
|
pgBackupValidate(¤t);
|
2017-02-25 15:12:07 +03:00
|
|
|
|
2017-04-25 17:33:52 +03:00
|
|
|
elog(INFO, "Backup %s completed", base36enc(current.start_time));
|
|
|
|
|
2017-04-20 15:22:18 +03:00
|
|
|
/*
|
|
|
|
* After successfil backup completion remove backups
|
|
|
|
* which are expired according to retention policies
|
|
|
|
*/
|
2018-01-24 04:37:47 +03:00
|
|
|
if (delete_expired || delete_wal)
|
2017-04-20 15:22:18 +03:00
|
|
|
do_retention_purge();
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* Confirm that this server version is supported
|
2012-05-18 08:54:36 +00:00
|
|
|
*/
|
2017-03-21 11:54:49 +03:00
|
|
|
static void
|
2013-12-12 22:20:08 +09:00
|
|
|
check_server_version(void)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
|
|
|
/* confirm server version */
|
2017-03-21 11:54:49 +03:00
|
|
|
server_version = PQserverVersion(backup_conn);
|
|
|
|
|
2018-02-09 13:22:01 +03:00
|
|
|
if (server_version == 0)
|
|
|
|
elog(ERROR, "Unknown server version %d", server_version);
|
|
|
|
|
|
|
|
if (server_version < 100000)
|
|
|
|
sprintf(server_version_str, "%d.%d",
|
|
|
|
server_version / 10000,
|
|
|
|
(server_version / 100) % 100);
|
|
|
|
else
|
|
|
|
sprintf(server_version_str, "%d",
|
|
|
|
server_version / 10000);
|
|
|
|
|
2016-03-02 12:11:25 +03:00
|
|
|
if (server_version < 90500)
|
2016-01-19 12:41:30 +09:00
|
|
|
elog(ERROR,
|
2018-02-09 13:22:01 +03:00
|
|
|
"server version is %s, must be %s or higher",
|
|
|
|
server_version_str, "9.5");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2016-09-29 17:33:21 +03:00
|
|
|
if (from_replica && server_version < 90600)
|
|
|
|
elog(ERROR,
|
2018-02-09 13:22:01 +03:00
|
|
|
"server version is %s, must be %s or higher for backup from replica",
|
|
|
|
server_version_str, "9.6");
|
2017-05-22 14:22:20 +03:00
|
|
|
|
|
|
|
/* Do exclusive backup only for PostgreSQL 9.5 */
|
2017-11-21 15:51:45 +03:00
|
|
|
exclusive_backup = server_version < 90600 ||
|
|
|
|
current.backup_mode == BACKUP_MODE_DIFF_PTRACK;
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
2017-04-05 19:48:55 +03:00
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* Ensure that backup directory was initialized for the same PostgreSQL
|
|
|
|
* instance we opened connection to. And that target backup database PGDATA
|
|
|
|
* belogns to the same instance.
|
|
|
|
* All system identifiers must be equal.
|
2017-04-05 19:48:55 +03:00
|
|
|
*/
|
|
|
|
static void
|
2017-04-18 11:41:02 +03:00
|
|
|
check_system_identifiers(void)
|
2017-04-05 19:48:55 +03:00
|
|
|
{
|
2017-04-18 11:41:02 +03:00
|
|
|
uint64 system_id_conn;
|
|
|
|
uint64 system_id_pgdata;
|
2017-04-05 19:48:55 +03:00
|
|
|
|
2017-05-29 18:52:43 +03:00
|
|
|
system_id_pgdata = get_system_identifier(pgdata);
|
2017-12-11 15:44:54 +03:00
|
|
|
system_id_conn = get_remote_system_identifier(backup_conn);
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-12-11 15:44:54 +03:00
|
|
|
if (system_id_conn != system_identifier)
|
|
|
|
elog(ERROR, "Backup data directory was initialized for system id %ld, but connected instance system id is %ld",
|
|
|
|
system_identifier, system_id_conn);
|
2017-04-18 11:41:02 +03:00
|
|
|
if (system_id_pgdata != system_identifier)
|
|
|
|
elog(ERROR, "Backup data directory was initialized for system id %ld, but target backup directory system id is %ld",
|
|
|
|
system_identifier, system_id_pgdata);
|
2017-04-05 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
|
|
|
* Ensure that target backup database is initialized with
|
|
|
|
* compatible settings. Currently check BLCKSZ and XLOG_BLCKSZ.
|
|
|
|
*/
|
2012-05-18 08:54:36 +00:00
|
|
|
static void
|
|
|
|
confirm_block_size(const char *name, int blcksz)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
char *endp;
|
|
|
|
int block_size;
|
|
|
|
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.current_setting($1)", 1, &name, true);
|
2012-05-18 08:54:36 +00:00
|
|
|
if (PQntuples(res) != 1 || PQnfields(res) != 1)
|
2017-03-21 11:54:49 +03:00
|
|
|
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10);
|
|
|
|
PQclear(res);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
if ((endp && *endp) || block_size != blcksz)
|
2016-01-19 12:41:30 +09:00
|
|
|
elog(ERROR,
|
2017-03-21 11:54:49 +03:00
|
|
|
"%s(%d) is not compatible(%d expected)",
|
|
|
|
name, block_size, blcksz);
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify start of backup to PostgreSQL server.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
|
|
|
{
|
2017-03-21 18:30:48 +03:00
|
|
|
PGresult *res;
|
|
|
|
const char *params[2];
|
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
2017-07-11 17:41:52 +03:00
|
|
|
PGconn *conn;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
|
|
|
params[0] = label;
|
|
|
|
|
2017-07-11 17:41:52 +03:00
|
|
|
/* For replica we call pg_start_backup() on master */
|
|
|
|
conn = (from_replica) ? master_conn : backup_conn;
|
|
|
|
|
2013-12-12 22:20:08 +09:00
|
|
|
/* 2nd argument is 'fast'*/
|
|
|
|
params[1] = smooth ? "false" : "true";
|
2017-05-22 14:22:20 +03:00
|
|
|
if (!exclusive_backup)
|
2017-07-11 17:41:52 +03:00
|
|
|
res = pgut_execute(conn,
|
2018-03-02 19:20:40 +03:00
|
|
|
"SELECT pg_catalog.pg_start_backup($1, $2, false)",
|
2016-09-29 17:33:21 +03:00
|
|
|
2,
|
2017-11-29 23:03:12 +03:00
|
|
|
params,
|
|
|
|
true);
|
2016-09-29 17:33:21 +03:00
|
|
|
else
|
2017-07-11 17:41:52 +03:00
|
|
|
res = pgut_execute(conn,
|
2018-03-02 19:20:40 +03:00
|
|
|
"SELECT pg_catalog.pg_start_backup($1, $2)",
|
2016-09-29 17:33:21 +03:00
|
|
|
2,
|
2017-11-29 23:03:12 +03:00
|
|
|
params,
|
|
|
|
true);
|
2013-12-12 22:20:08 +09:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Extract timeline and LSN from results of pg_start_backup() */
|
2017-03-21 18:30:48 +03:00
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
|
|
|
/* Calculate LSN */
|
|
|
|
backup->start_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
|
|
|
|
2017-04-24 19:25:42 +03:00
|
|
|
PQclear(res);
|
2017-05-18 15:33:03 +03:00
|
|
|
|
2017-07-13 16:16:05 +03:00
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
|
|
|
/*
|
|
|
|
* Switch to a new WAL segment. It is necessary to get archived WAL
|
|
|
|
* segment, which includes start LSN of current backup.
|
|
|
|
*/
|
|
|
|
pg_switch_wal(conn);
|
|
|
|
|
2017-05-18 15:33:03 +03:00
|
|
|
if (!stream_wal)
|
2017-07-13 16:16:05 +03:00
|
|
|
{
|
2017-05-24 15:17:47 +03:00
|
|
|
/*
|
|
|
|
* Do not wait start_lsn for stream backup.
|
|
|
|
* Because WAL streaming will start after pg_start_backup() in stream
|
|
|
|
* mode.
|
|
|
|
*/
|
2017-07-13 16:16:05 +03:00
|
|
|
/* In PAGE mode wait for current segment... */
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
|
|
|
wait_wal_lsn(backup->start_lsn, false);
|
|
|
|
/* ...for others wait for previous segment */
|
|
|
|
else
|
|
|
|
wait_wal_lsn(backup->start_lsn, true);
|
|
|
|
}
|
2017-07-12 15:33:21 +03:00
|
|
|
|
|
|
|
/* Wait for start_lsn to be replayed by replica */
|
|
|
|
if (from_replica)
|
|
|
|
wait_replica_wal_lsn(backup->start_lsn, true);
|
2017-08-14 14:45:59 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set flag that pg_start_backup() was called. If an error will happen it
|
|
|
|
* is necessary to call pg_stop_backup() in backup_cleanup().
|
|
|
|
*/
|
|
|
|
backup_in_progress = true;
|
2017-04-24 19:25:42 +03:00
|
|
|
}
|
|
|
|
|
2017-07-13 16:16:05 +03:00
|
|
|
/*
|
|
|
|
* Switch to a new WAL segment. It should be called only for master.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pg_switch_wal(PGconn *conn)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
/* Remove annoying NOTICE messages generated by backend */
|
2017-11-29 23:03:12 +03:00
|
|
|
res = pgut_execute(conn, "SET client_min_messages = warning;", 0, NULL, true);
|
2017-07-13 16:16:05 +03:00
|
|
|
PQclear(res);
|
|
|
|
|
|
|
|
if (server_version >= 100000)
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_wal()", 0, NULL, true);
|
2017-07-13 16:16:05 +03:00
|
|
|
else
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(conn, "SELECT * FROM pg_catalog.pg_switch_xlog()", 0, NULL, true);
|
2017-07-13 16:16:05 +03:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
|
|
|
* Check if the instance supports ptrack
|
2017-06-07 16:28:22 +03:00
|
|
|
* TODO Maybe we should rather check ptrack_version()?
|
2017-04-18 11:41:02 +03:00
|
|
|
*/
|
2016-10-18 16:25:13 +03:00
|
|
|
static bool
|
|
|
|
pg_ptrack_support(void)
|
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGresult *res_db;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn,
|
2017-10-11 16:10:25 +03:00
|
|
|
"SELECT proname FROM pg_proc WHERE proname='ptrack_version'",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-10-11 16:10:25 +03:00
|
|
|
if (PQntuples(res_db) == 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-10-11 16:10:25 +03:00
|
|
|
res_db = pgut_execute(backup_conn,
|
2018-03-02 19:20:40 +03:00
|
|
|
"SELECT pg_catalog.ptrack_version()",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2016-10-18 16:25:13 +03:00
|
|
|
if (PQntuples(res_db) == 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2018-01-16 11:54:15 +03:00
|
|
|
/* Now we support only ptrack version 1.5 */
|
|
|
|
if (strcmp(PQgetvalue(res_db, 0, 0), "1.5") != 0)
|
2017-10-09 18:22:25 +03:00
|
|
|
{
|
2018-01-16 11:54:15 +03:00
|
|
|
elog(WARNING, "Update your ptrack to the version 1.5. Current version is %s", PQgetvalue(res_db, 0, 0));
|
2017-10-09 18:22:25 +03:00
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
PQclear(res_db);
|
2016-11-22 16:07:54 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Check if ptrack is enabled in target instance */
|
2016-11-22 16:07:54 +03:00
|
|
|
static bool
|
|
|
|
pg_ptrack_enable(void)
|
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGresult *res_db;
|
|
|
|
|
2017-11-29 23:03:12 +03:00
|
|
|
res_db = pgut_execute(backup_conn, "show ptrack_enable", 0, NULL, true);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2016-11-15 16:23:53 +03:00
|
|
|
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
2016-10-18 16:25:13 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-15 17:58:44 +03:00
|
|
|
/* Check if ptrack is enabled in target instance */
|
|
|
|
static bool
|
|
|
|
pg_checksum_enable(void)
|
|
|
|
{
|
|
|
|
PGresult *res_db;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn, "show data_checksums", 0, NULL, true);
|
|
|
|
|
|
|
|
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Check if target instance is replica */
|
2016-10-25 14:38:51 +03:00
|
|
|
static bool
|
|
|
|
pg_is_in_recovery(void)
|
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGresult *res_db;
|
|
|
|
|
2018-03-02 19:20:40 +03:00
|
|
|
res_db = pgut_execute(backup_conn, "SELECT pg_catalog.pg_is_in_recovery()", 0, NULL, true);
|
2016-10-25 14:38:51 +03:00
|
|
|
|
|
|
|
if (PQgetvalue(res_db, 0, 0)[0] == 't')
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Clear ptrack files in all databases of the instance we connected to */
|
2016-02-27 21:07:55 +03:00
|
|
|
static void
|
|
|
|
pg_ptrack_clear(void)
|
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGresult *res_db,
|
|
|
|
*res;
|
|
|
|
const char *dbname;
|
|
|
|
int i;
|
2017-09-29 20:25:11 +03:00
|
|
|
Oid dbOid, tblspcOid;
|
|
|
|
char *params[2];
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
params[0] = palloc(64);
|
|
|
|
params[1] = palloc(64);
|
|
|
|
res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2016-02-27 21:07:55 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
for(i = 0; i < PQntuples(res_db); i++)
|
2016-04-15 15:58:58 +03:00
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGconn *tmp_conn;
|
|
|
|
|
|
|
|
dbname = PQgetvalue(res_db, i, 0);
|
2017-10-02 21:05:24 +03:00
|
|
|
if (strcmp(dbname, "template0") == 0)
|
2016-04-15 15:58:58 +03:00
|
|
|
continue;
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
dbOid = atoi(PQgetvalue(res_db, i, 1));
|
|
|
|
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
|
|
|
|
2017-03-21 11:54:49 +03:00
|
|
|
tmp_conn = pgut_connect(dbname);
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()", 0, NULL, true);
|
2017-09-29 20:25:11 +03:00
|
|
|
|
|
|
|
sprintf(params[0], "%i", dbOid);
|
|
|
|
sprintf(params[1], "%i", tblspcOid);
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
2017-11-29 23:03:12 +03:00
|
|
|
2, (const char **)params, true);
|
2016-04-15 15:58:58 +03:00
|
|
|
PQclear(res);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
|
|
|
pgut_disconnect(tmp_conn);
|
2016-04-15 15:58:58 +03:00
|
|
|
}
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
pfree(params[0]);
|
|
|
|
pfree(params[1]);
|
2016-04-15 15:58:58 +03:00
|
|
|
PQclear(res_db);
|
2016-02-27 21:07:55 +03:00
|
|
|
}
|
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
static bool
|
|
|
|
pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid)
|
|
|
|
{
|
2017-11-01 12:47:28 +03:00
|
|
|
char *params[2];
|
|
|
|
char *dbname;
|
|
|
|
PGresult *res_db;
|
|
|
|
PGresult *res;
|
|
|
|
char *result;
|
2017-09-29 20:25:11 +03:00
|
|
|
|
|
|
|
params[0] = palloc(64);
|
|
|
|
params[1] = palloc(64);
|
|
|
|
|
2017-10-03 17:57:48 +03:00
|
|
|
sprintf(params[0], "%i", dbOid);
|
|
|
|
res_db = pgut_execute(backup_conn,
|
|
|
|
"SELECT datname FROM pg_database WHERE oid=$1",
|
2017-11-29 23:03:12 +03:00
|
|
|
1, (const char **) params, true);
|
2017-10-03 17:57:48 +03:00
|
|
|
/*
|
|
|
|
* If database is not found, it's not an error.
|
|
|
|
* It could have been deleted since previous backup.
|
|
|
|
*/
|
|
|
|
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
|
|
|
return false;
|
|
|
|
|
2017-11-01 12:47:28 +03:00
|
|
|
dbname = PQgetvalue(res_db, 0, 0);
|
2017-10-03 17:57:48 +03:00
|
|
|
|
|
|
|
/* Always backup all files from template0 database */
|
|
|
|
if (strcmp(dbname, "template0") == 0)
|
|
|
|
{
|
2017-11-01 12:47:28 +03:00
|
|
|
PQclear(res_db);
|
2017-10-03 17:57:48 +03:00
|
|
|
return true;
|
|
|
|
}
|
2017-11-01 12:47:28 +03:00
|
|
|
PQclear(res_db);
|
2017-10-03 17:57:48 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
sprintf(params[0], "%i", dbOid);
|
|
|
|
sprintf(params[1], "%i", tblspcOid);
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
2017-11-29 23:03:12 +03:00
|
|
|
2, (const char **)params, true);
|
2017-09-29 20:25:11 +03:00
|
|
|
|
|
|
|
if (PQnfields(res) != 1)
|
|
|
|
elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()");
|
|
|
|
|
2017-10-02 00:57:38 +03:00
|
|
|
result = PQgetvalue(res, 0, 0);
|
2017-09-29 20:25:11 +03:00
|
|
|
PQclear(res);
|
|
|
|
pfree(params[0]);
|
|
|
|
pfree(params[1]);
|
|
|
|
|
2017-10-02 00:57:38 +03:00
|
|
|
return (strcmp(result, "t") == 0);
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Read and clear ptrack files of the target relation.
|
|
|
|
* Result is a bytea ptrack map of all segments of the target relation.
|
2017-09-26 20:50:06 +03:00
|
|
|
* case 1: we know a tablespace_oid, db_oid, and rel_filenode
|
|
|
|
* case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default)
|
|
|
|
* case 3: we know only rel_filenode (because file in pg_global)
|
2017-04-18 11:41:02 +03:00
|
|
|
*/
|
2016-05-11 19:35:14 +03:00
|
|
|
static char *
|
2017-09-26 20:50:06 +03:00
|
|
|
pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
2017-03-21 11:54:49 +03:00
|
|
|
size_t *result_size)
|
2016-05-11 19:35:14 +03:00
|
|
|
{
|
2017-03-21 11:54:49 +03:00
|
|
|
PGconn *tmp_conn;
|
|
|
|
PGresult *res_db,
|
|
|
|
*res;
|
|
|
|
char *params[2];
|
|
|
|
char *result;
|
2017-11-09 12:47:37 +03:00
|
|
|
char *val;
|
2016-05-11 19:35:14 +03:00
|
|
|
|
|
|
|
params[0] = palloc(64);
|
|
|
|
params[1] = palloc(64);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-11-01 12:47:28 +03:00
|
|
|
/* regular file (not in directory 'global') */
|
2017-09-26 20:50:06 +03:00
|
|
|
if (db_oid != 0)
|
|
|
|
{
|
2017-11-01 12:47:28 +03:00
|
|
|
char *dbname;
|
|
|
|
|
2017-09-26 20:50:06 +03:00
|
|
|
sprintf(params[0], "%i", db_oid);
|
|
|
|
res_db = pgut_execute(backup_conn,
|
|
|
|
"SELECT datname FROM pg_database WHERE oid=$1",
|
2017-11-29 23:03:12 +03:00
|
|
|
1, (const char **) params, true);
|
2017-09-26 20:50:06 +03:00
|
|
|
/*
|
|
|
|
* If database is not found, it's not an error.
|
|
|
|
* It could have been deleted since previous backup.
|
|
|
|
*/
|
|
|
|
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
|
|
|
return NULL;
|
2017-09-01 13:04:30 +03:00
|
|
|
|
2017-11-01 12:47:28 +03:00
|
|
|
dbname = PQgetvalue(res_db, 0, 0);
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-10-02 00:57:38 +03:00
|
|
|
if (strcmp(dbname, "template0") == 0)
|
|
|
|
{
|
2017-11-01 12:47:28 +03:00
|
|
|
PQclear(res_db);
|
2017-10-02 00:57:38 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-09-26 20:50:06 +03:00
|
|
|
tmp_conn = pgut_connect(dbname);
|
|
|
|
sprintf(params[0], "%i", tablespace_oid);
|
|
|
|
sprintf(params[1], "%i", rel_filenode);
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
2017-11-29 23:03:12 +03:00
|
|
|
2, (const char **)params, true);
|
2017-09-26 20:50:06 +03:00
|
|
|
|
|
|
|
if (PQnfields(res) != 1)
|
|
|
|
elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u",
|
2017-11-01 12:47:28 +03:00
|
|
|
dbname, tablespace_oid, rel_filenode);
|
|
|
|
PQclear(res_db);
|
2017-09-26 20:50:06 +03:00
|
|
|
pgut_disconnect(tmp_conn);
|
|
|
|
}
|
|
|
|
/* file in directory 'global' */
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2017-09-29 20:25:11 +03:00
|
|
|
* execute ptrack_get_and_clear for relation in pg_global
|
|
|
|
* Use backup_conn, cause we can do it from any database.
|
2017-09-26 20:50:06 +03:00
|
|
|
*/
|
2017-09-29 20:25:11 +03:00
|
|
|
sprintf(params[0], "%i", tablespace_oid);
|
2017-09-26 20:50:06 +03:00
|
|
|
sprintf(params[1], "%i", rel_filenode);
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
2017-11-29 23:03:12 +03:00
|
|
|
2, (const char **)params, true);
|
2017-09-26 20:50:06 +03:00
|
|
|
|
|
|
|
if (PQnfields(res) != 1)
|
2017-11-10 14:13:44 +03:00
|
|
|
elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u",
|
2017-09-26 20:50:06 +03:00
|
|
|
rel_filenode);
|
|
|
|
}
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-11-09 12:47:37 +03:00
|
|
|
val = PQgetvalue(res, 0, 0);
|
|
|
|
|
2017-11-10 14:13:44 +03:00
|
|
|
/* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x.
|
|
|
|
* It should be fixed in future ptrack releases, but till then we
|
|
|
|
* can parse it.
|
|
|
|
*/
|
2017-11-09 12:47:37 +03:00
|
|
|
if (strcmp("x", val+1) == 0)
|
|
|
|
{
|
2017-11-10 14:13:44 +03:00
|
|
|
/* Ptrack file is missing */
|
|
|
|
return NULL;
|
2017-11-09 12:47:37 +03:00
|
|
|
}
|
|
|
|
|
2017-03-21 11:54:49 +03:00
|
|
|
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
|
|
|
result_size);
|
2016-05-11 19:35:14 +03:00
|
|
|
PQclear(res);
|
|
|
|
pfree(params[0]);
|
|
|
|
pfree(params[1]);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
2017-05-29 18:53:48 +03:00
|
|
|
* Wait for target 'lsn'.
|
|
|
|
*
|
|
|
|
* If current backup started in archive mode wait for 'lsn' to be archived in
|
|
|
|
* archive 'wal' directory with WAL segment file.
|
|
|
|
* If current backup started in stream mode wait for 'lsn' to be streamed in
|
2017-08-07 16:23:37 +03:00
|
|
|
* 'pg_wal' directory.
|
2017-07-13 09:39:00 +03:00
|
|
|
*
|
|
|
|
* If 'wait_prev_segment' wait for previous segment.
|
2017-04-18 11:41:02 +03:00
|
|
|
*/
|
2016-11-25 16:54:24 +03:00
|
|
|
static void
|
2017-07-13 09:39:00 +03:00
|
|
|
wait_wal_lsn(XLogRecPtr lsn, bool wait_prev_segment)
|
2016-11-25 16:54:24 +03:00
|
|
|
{
|
|
|
|
TimeLineID tli;
|
|
|
|
XLogSegNo targetSegNo;
|
2017-05-24 15:17:47 +03:00
|
|
|
char wal_dir[MAXPGPATH],
|
2017-12-12 15:07:13 +03:00
|
|
|
wal_segment_path[MAXPGPATH];
|
2017-05-24 15:17:47 +03:00
|
|
|
char wal_segment[MAXFNAMELEN];
|
2017-12-12 15:07:13 +03:00
|
|
|
bool file_exists = false;
|
2017-05-25 14:05:48 +03:00
|
|
|
uint32 try_count = 0,
|
|
|
|
timeout;
|
2016-11-25 16:54:24 +03:00
|
|
|
|
2017-12-12 15:07:13 +03:00
|
|
|
#ifdef HAVE_LIBZ
|
|
|
|
char gz_wal_segment_path[MAXPGPATH];
|
|
|
|
#endif
|
|
|
|
|
2016-11-25 16:54:24 +03:00
|
|
|
tli = get_current_timeline(false);
|
|
|
|
|
2017-04-20 12:38:51 +03:00
|
|
|
/* Compute the name of the WAL file containig requested LSN */
|
2016-11-25 16:54:24 +03:00
|
|
|
XLByteToSeg(lsn, targetSegNo);
|
2017-07-13 09:39:00 +03:00
|
|
|
if (wait_prev_segment)
|
|
|
|
targetSegNo--;
|
2017-05-24 15:17:47 +03:00
|
|
|
XLogFileName(wal_segment, tli, targetSegNo);
|
2016-11-25 16:54:24 +03:00
|
|
|
|
2017-05-24 15:17:47 +03:00
|
|
|
if (stream_wal)
|
|
|
|
{
|
|
|
|
pgBackupGetPath2(¤t, wal_dir, lengthof(wal_dir),
|
|
|
|
DATABASE_DIR, PG_XLOG_DIR);
|
2017-12-12 15:07:13 +03:00
|
|
|
join_path_components(wal_segment_path, wal_dir, wal_segment);
|
2017-05-25 14:05:48 +03:00
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
timeout = (uint32) checkpoint_timeout();
|
|
|
|
timeout = timeout + timeout * 0.1;
|
2017-05-24 15:17:47 +03:00
|
|
|
}
|
|
|
|
else
|
2017-05-25 14:05:48 +03:00
|
|
|
{
|
2017-12-12 15:07:13 +03:00
|
|
|
join_path_components(wal_segment_path, arclog_path, wal_segment);
|
2017-05-25 14:05:48 +03:00
|
|
|
timeout = archive_timeout;
|
|
|
|
}
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-11-22 17:04:46 +03:00
|
|
|
if (wait_prev_segment)
|
|
|
|
elog(LOG, "Looking for segment: %s", wal_segment);
|
|
|
|
else
|
|
|
|
elog(LOG, "Looking for LSN: %X/%X in segment: %s", (uint32) (lsn >> 32), (uint32) lsn, wal_segment);
|
|
|
|
|
2017-12-12 15:07:13 +03:00
|
|
|
#ifdef HAVE_LIBZ
|
|
|
|
snprintf(gz_wal_segment_path, sizeof(gz_wal_segment_path), "%s.gz",
|
|
|
|
wal_segment_path);
|
|
|
|
#endif
|
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
/* Wait until target LSN is archived or streamed */
|
2017-05-25 16:38:21 +03:00
|
|
|
while (true)
|
2016-11-25 16:54:24 +03:00
|
|
|
{
|
2017-12-12 15:07:13 +03:00
|
|
|
if (!file_exists)
|
|
|
|
{
|
|
|
|
file_exists = fileExists(wal_segment_path);
|
|
|
|
|
|
|
|
/* Try to find compressed WAL file */
|
|
|
|
if (!file_exists)
|
|
|
|
{
|
|
|
|
#ifdef HAVE_LIBZ
|
|
|
|
file_exists = fileExists(gz_wal_segment_path);
|
|
|
|
if (file_exists)
|
|
|
|
elog(LOG, "Found compressed WAL segment: %s", wal_segment_path);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else
|
|
|
|
elog(LOG, "Found WAL segment: %s", wal_segment_path);
|
|
|
|
}
|
2017-05-25 16:38:21 +03:00
|
|
|
|
|
|
|
if (file_exists)
|
|
|
|
{
|
2017-07-13 13:37:44 +03:00
|
|
|
/* Do not check LSN for previous WAL segment */
|
|
|
|
if (wait_prev_segment)
|
|
|
|
return;
|
|
|
|
|
2017-05-25 16:38:21 +03:00
|
|
|
/*
|
2017-05-29 18:53:48 +03:00
|
|
|
* A WAL segment found. Check LSN on it.
|
2017-05-25 16:38:21 +03:00
|
|
|
*/
|
|
|
|
if ((stream_wal && wal_contains_lsn(wal_dir, lsn, tli)) ||
|
|
|
|
(!stream_wal && wal_contains_lsn(arclog_path, lsn, tli)))
|
|
|
|
/* Target LSN was found */
|
2017-11-22 17:04:46 +03:00
|
|
|
{
|
|
|
|
elog(LOG, "Found LSN: %X/%X", (uint32) (lsn >> 32), (uint32) lsn);
|
2017-05-25 16:38:21 +03:00
|
|
|
return;
|
2017-11-22 17:04:46 +03:00
|
|
|
}
|
2017-05-25 16:38:21 +03:00
|
|
|
}
|
|
|
|
|
2016-11-25 16:54:24 +03:00
|
|
|
sleep(1);
|
|
|
|
if (interrupted)
|
2017-12-12 15:07:13 +03:00
|
|
|
elog(ERROR, "Interrupted during waiting for WAL archiving");
|
2016-11-25 16:54:24 +03:00
|
|
|
try_count++;
|
2017-03-24 14:07:03 +03:00
|
|
|
|
|
|
|
/* Inform user if WAL segment is absent in first attempt */
|
|
|
|
if (try_count == 1)
|
2017-07-13 13:37:44 +03:00
|
|
|
{
|
|
|
|
if (wait_prev_segment)
|
2017-12-12 15:07:13 +03:00
|
|
|
elog(INFO, "Wait for WAL segment %s to be archived",
|
|
|
|
wal_segment_path);
|
2017-07-13 13:37:44 +03:00
|
|
|
else
|
2017-12-12 15:07:13 +03:00
|
|
|
elog(INFO, "Wait for LSN %X/%X in archived WAL segment %s",
|
|
|
|
(uint32) (lsn >> 32), (uint32) lsn, wal_segment_path);
|
2017-07-13 13:37:44 +03:00
|
|
|
}
|
2017-03-24 14:07:03 +03:00
|
|
|
|
2017-05-25 14:05:48 +03:00
|
|
|
if (timeout > 0 && try_count > timeout)
|
2017-05-25 16:38:21 +03:00
|
|
|
{
|
|
|
|
if (file_exists)
|
|
|
|
elog(ERROR, "WAL segment %s was archived, "
|
|
|
|
"but target LSN %X/%X could not be archived in %d seconds",
|
|
|
|
wal_segment, (uint32) (lsn >> 32), (uint32) lsn, timeout);
|
2017-07-13 13:37:44 +03:00
|
|
|
/* If WAL segment doesn't exist or we wait for previous segment */
|
2017-05-25 16:38:21 +03:00
|
|
|
else
|
|
|
|
elog(ERROR,
|
2017-12-12 15:07:13 +03:00
|
|
|
"Switched WAL segment %s could not be archived in %d seconds",
|
2017-05-25 16:38:21 +03:00
|
|
|
wal_segment, timeout);
|
|
|
|
}
|
2016-11-25 16:54:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-11 17:41:52 +03:00
|
|
|
/*
|
2017-07-12 12:08:14 +03:00
|
|
|
* Wait for target 'lsn' on replica instance from master.
|
2017-07-11 17:41:52 +03:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
wait_replica_wal_lsn(XLogRecPtr lsn, bool is_start_backup)
|
|
|
|
{
|
|
|
|
uint32 try_count = 0;
|
|
|
|
|
|
|
|
Assert(from_replica);
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
|
|
|
XLogRecPtr replica_lsn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For lsn from pg_start_backup() we need it to be replayed on replica's
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
if (is_start_backup)
|
2017-08-11 18:53:57 +03:00
|
|
|
{
|
|
|
|
if (server_version >= 100000)
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_replay_lsn()",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-08-11 18:53:57 +03:00
|
|
|
else
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_replay_location()",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-08-11 18:53:57 +03:00
|
|
|
}
|
2017-07-11 17:41:52 +03:00
|
|
|
/*
|
|
|
|
* For lsn from pg_stop_backup() we need it only to be received by
|
|
|
|
* replica and fsync()'ed on WAL segment.
|
|
|
|
*/
|
|
|
|
else
|
2017-08-11 18:53:57 +03:00
|
|
|
{
|
|
|
|
if (server_version >= 100000)
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_wal_receive_lsn()",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-08-11 18:53:57 +03:00
|
|
|
else
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_last_xlog_receive_location()",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-08-11 18:53:57 +03:00
|
|
|
}
|
2017-07-11 17:41:52 +03:00
|
|
|
|
|
|
|
/* Extract timeline and LSN from result */
|
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
|
|
|
/* Calculate LSN */
|
|
|
|
replica_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
|
|
|
PQclear(res);
|
|
|
|
|
|
|
|
/* target lsn was replicated */
|
|
|
|
if (replica_lsn >= lsn)
|
|
|
|
break;
|
|
|
|
|
|
|
|
sleep(1);
|
|
|
|
if (interrupted)
|
|
|
|
elog(ERROR, "Interrupted during waiting for target LSN");
|
|
|
|
try_count++;
|
|
|
|
|
|
|
|
/* Inform user if target lsn is absent in first attempt */
|
|
|
|
if (try_count == 1)
|
|
|
|
elog(INFO, "Wait for target LSN %X/%X to be received by replica",
|
|
|
|
(uint32) (lsn >> 32), (uint32) lsn);
|
|
|
|
|
|
|
|
if (replica_timeout > 0 && try_count > replica_timeout)
|
|
|
|
elog(ERROR, "Target LSN %X/%X could not be recevied by replica "
|
|
|
|
"in %d seconds",
|
|
|
|
(uint32) (lsn >> 32), (uint32) lsn,
|
|
|
|
replica_timeout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/*
|
|
|
|
* Notify end of backup to PostgreSQL server.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pg_stop_backup(pgBackup *backup)
|
|
|
|
{
|
2017-11-29 12:51:43 +03:00
|
|
|
PGconn *conn;
|
|
|
|
PGresult *res;
|
|
|
|
PGresult *tablespace_map_content = NULL;
|
2017-03-21 18:30:48 +03:00
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
2017-06-16 15:50:54 +03:00
|
|
|
XLogRecPtr restore_lsn = InvalidXLogRecPtr;
|
2017-11-29 12:51:43 +03:00
|
|
|
int pg_stop_backup_timeout = 0;
|
|
|
|
char path[MAXPGPATH];
|
|
|
|
char backup_label[MAXPGPATH];
|
|
|
|
FILE *fp;
|
|
|
|
pgFile *file;
|
|
|
|
size_t len;
|
2018-03-20 17:09:57 +03:00
|
|
|
char *val = NULL;
|
|
|
|
char *stop_backup_query = NULL;
|
2017-03-24 17:22:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We will use this values if there are no transactions between start_lsn
|
|
|
|
* and stop_lsn.
|
|
|
|
*/
|
2017-03-24 16:58:35 +03:00
|
|
|
time_t recovery_time;
|
|
|
|
TransactionId recovery_xid;
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-05-22 14:22:20 +03:00
|
|
|
if (!backup_in_progress)
|
|
|
|
elog(FATAL, "backup is not in progress");
|
|
|
|
|
2017-07-12 12:08:14 +03:00
|
|
|
/* For replica we call pg_stop_backup() on master */
|
|
|
|
conn = (from_replica) ? master_conn : backup_conn;
|
|
|
|
|
2017-03-21 18:30:48 +03:00
|
|
|
/* Remove annoying NOTICE messages generated by backend */
|
2017-07-12 12:08:14 +03:00
|
|
|
res = pgut_execute(conn, "SET client_min_messages = warning;",
|
2017-11-29 23:03:12 +03:00
|
|
|
0, NULL, true);
|
2017-03-21 18:30:48 +03:00
|
|
|
PQclear(res);
|
|
|
|
|
2017-06-07 16:50:33 +03:00
|
|
|
/* Create restore point */
|
|
|
|
if (backup != NULL)
|
|
|
|
{
|
|
|
|
const char *params[1];
|
|
|
|
char name[1024];
|
|
|
|
|
|
|
|
if (!from_replica)
|
|
|
|
snprintf(name, lengthof(name), "pg_probackup, backup_id %s",
|
2017-12-20 17:57:01 +03:00
|
|
|
base36enc(backup->start_time));
|
2017-06-07 16:50:33 +03:00
|
|
|
else
|
|
|
|
snprintf(name, lengthof(name), "pg_probackup, backup_id %s. Replica Backup",
|
2017-12-20 17:57:01 +03:00
|
|
|
base36enc(backup->start_time));
|
2017-07-12 12:08:14 +03:00
|
|
|
params[0] = name;
|
2017-06-07 16:50:33 +03:00
|
|
|
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(conn, "SELECT pg_catalog.pg_create_restore_point($1)",
|
2017-11-29 23:03:12 +03:00
|
|
|
1, params, true);
|
2017-07-12 12:08:14 +03:00
|
|
|
PQclear(res);
|
2017-06-07 16:50:33 +03:00
|
|
|
}
|
|
|
|
|
2017-06-14 19:30:25 +03:00
|
|
|
/*
|
|
|
|
* send pg_stop_backup asynchronously because we could came
|
|
|
|
* here from backup_cleanup() after some error caused by
|
|
|
|
* postgres archive_command problem and in this case we will
|
|
|
|
* wait for pg_stop_backup() forever.
|
|
|
|
*/
|
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!pg_stop_backup_is_sent)
|
|
|
|
{
|
2017-11-30 15:49:48 +03:00
|
|
|
bool sent = false;
|
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!exclusive_backup)
|
2017-11-29 12:51:43 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
/*
|
|
|
|
* Stop the non-exclusive backup. Besides stop_lsn it returns from
|
|
|
|
* pg_stop_backup(false) copy of the backup label and tablespace map
|
|
|
|
* so they can be written to disk by the caller.
|
|
|
|
*/
|
2018-03-19 20:51:01 +03:00
|
|
|
stop_backup_query = "SELECT"
|
2018-03-02 19:20:40 +03:00
|
|
|
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
2017-11-29 12:51:43 +03:00
|
|
|
" current_timestamp(0)::timestamptz,"
|
|
|
|
" lsn,"
|
2018-02-06 22:15:41 +03:00
|
|
|
" labelfile,"
|
2017-11-29 12:51:43 +03:00
|
|
|
" spcmapfile"
|
2018-03-19 20:51:01 +03:00
|
|
|
" FROM pg_catalog.pg_stop_backup(false)";
|
|
|
|
|
2017-11-29 12:51:43 +03:00
|
|
|
}
|
2017-11-01 12:29:28 +03:00
|
|
|
else
|
2017-11-29 12:51:43 +03:00
|
|
|
{
|
2017-11-29 23:03:12 +03:00
|
|
|
|
2018-03-19 20:51:01 +03:00
|
|
|
stop_backup_query = "SELECT"
|
2018-03-02 19:20:40 +03:00
|
|
|
" pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot()),"
|
2017-11-29 12:51:43 +03:00
|
|
|
" current_timestamp(0)::timestamptz,"
|
2018-03-19 20:51:01 +03:00
|
|
|
" pg_catalog.pg_stop_backup() as lsn";
|
2017-11-29 12:51:43 +03:00
|
|
|
}
|
2018-03-19 20:51:01 +03:00
|
|
|
|
|
|
|
sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING);
|
2017-11-01 12:29:28 +03:00
|
|
|
pg_stop_backup_is_sent = true;
|
|
|
|
if (!sent)
|
|
|
|
elog(ERROR, "Failed to send pg_stop_backup query");
|
|
|
|
}
|
2017-06-14 19:30:25 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the result of pg_stop_backup(),
|
|
|
|
* but no longer than PG_STOP_BACKUP_TIMEOUT seconds
|
|
|
|
*/
|
2017-11-01 12:29:28 +03:00
|
|
|
if (pg_stop_backup_is_sent && !in_cleanup)
|
2017-06-14 19:30:25 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
while (1)
|
2017-06-14 20:36:11 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!PQconsumeInput(conn) || PQisBusy(conn))
|
2017-07-12 12:08:14 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
pg_stop_backup_timeout++;
|
|
|
|
sleep(1);
|
|
|
|
|
|
|
|
if (interrupted)
|
|
|
|
{
|
|
|
|
pgut_cancel(conn);
|
|
|
|
elog(ERROR, "interrupted during waiting for pg_stop_backup");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pg_stop_backup_timeout == 1)
|
|
|
|
elog(INFO, "wait for pg_stop_backup()");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If postgres haven't answered in PG_STOP_BACKUP_TIMEOUT seconds,
|
|
|
|
* send an interrupt.
|
|
|
|
*/
|
|
|
|
if (pg_stop_backup_timeout > PG_STOP_BACKUP_TIMEOUT)
|
|
|
|
{
|
|
|
|
pgut_cancel(conn);
|
|
|
|
elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it",
|
|
|
|
PG_STOP_BACKUP_TIMEOUT);
|
|
|
|
}
|
2017-07-12 12:08:14 +03:00
|
|
|
}
|
2017-11-01 12:29:28 +03:00
|
|
|
else
|
2017-07-12 12:08:14 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
res = PQgetResult(conn);
|
|
|
|
break;
|
2017-07-12 12:08:14 +03:00
|
|
|
}
|
2017-06-14 20:36:11 +03:00
|
|
|
}
|
2018-03-19 20:51:01 +03:00
|
|
|
|
|
|
|
/* Check successfull execution of pg_stop_backup() */
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!res)
|
|
|
|
elog(ERROR, "pg_stop backup() failed");
|
2017-11-22 17:04:46 +03:00
|
|
|
else
|
2018-03-19 20:51:01 +03:00
|
|
|
{
|
|
|
|
switch (PQresultStatus(res))
|
|
|
|
{
|
|
|
|
case PGRES_TUPLES_OK:
|
|
|
|
case PGRES_COMMAND_OK:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
elog(ERROR, "query failed: %s query was: %s",
|
|
|
|
PQerrorMessage(conn), stop_backup_query);
|
|
|
|
}
|
2017-11-22 17:04:46 +03:00
|
|
|
elog(INFO, "pg_stop backup() successfully executed");
|
2018-03-19 20:51:01 +03:00
|
|
|
}
|
2017-06-14 19:30:25 +03:00
|
|
|
|
2017-11-30 15:49:48 +03:00
|
|
|
backup_in_progress = false;
|
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
/* Extract timeline and LSN from results of pg_stop_backup() */
|
2018-02-06 22:15:41 +03:00
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 2), &xlogid, &xrecoff);
|
2017-11-01 12:29:28 +03:00
|
|
|
/* Calculate LSN */
|
|
|
|
stop_backup_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!XRecOffIsValid(stop_backup_lsn))
|
2017-05-31 16:05:14 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
stop_backup_lsn = restore_lsn;
|
2017-05-31 16:05:14 +03:00
|
|
|
}
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
if (!XRecOffIsValid(stop_backup_lsn))
|
|
|
|
elog(ERROR, "Invalid stop_backup_lsn value %X/%X",
|
|
|
|
(uint32) (stop_backup_lsn >> 32), (uint32) (stop_backup_lsn));
|
|
|
|
|
2017-11-29 12:51:43 +03:00
|
|
|
/* Write backup_label and tablespace_map */
|
2018-02-06 22:15:41 +03:00
|
|
|
if (!exclusive_backup)
|
2016-05-26 15:56:32 +03:00
|
|
|
{
|
2018-02-06 22:15:41 +03:00
|
|
|
Assert(PQnfields(res) >= 4);
|
|
|
|
pgBackupGetPath(¤t, path, lengthof(path), DATABASE_DIR);
|
|
|
|
|
|
|
|
/* Write backup_label */
|
|
|
|
join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE);
|
|
|
|
fp = fopen(backup_label, "w");
|
|
|
|
if (fp == NULL)
|
|
|
|
elog(ERROR, "can't open backup label file \"%s\": %s",
|
|
|
|
backup_label, strerror(errno));
|
|
|
|
|
|
|
|
len = strlen(PQgetvalue(res, 0, 3));
|
|
|
|
if (fwrite(PQgetvalue(res, 0, 3), 1, len, fp) != len ||
|
|
|
|
fflush(fp) != 0 ||
|
|
|
|
fsync(fileno(fp)) != 0 ||
|
|
|
|
fclose(fp))
|
|
|
|
elog(ERROR, "can't write backup label file \"%s\": %s",
|
|
|
|
backup_label, strerror(errno));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's vital to check if backup_files_list is initialized,
|
|
|
|
* because we could get here because the backup was interrupted
|
|
|
|
*/
|
|
|
|
if (backup_files_list)
|
|
|
|
{
|
|
|
|
file = pgFileNew(backup_label, true);
|
|
|
|
calc_file_checksum(file);
|
|
|
|
free(file->path);
|
|
|
|
file->path = strdup(PG_BACKUP_LABEL_FILE);
|
|
|
|
parray_append(backup_files_list, file);
|
|
|
|
}
|
2017-11-29 12:51:43 +03:00
|
|
|
}
|
2017-11-01 12:29:28 +03:00
|
|
|
|
2018-02-06 22:15:41 +03:00
|
|
|
if (sscanf(PQgetvalue(res, 0, 0), XID_FMT, &recovery_xid) != 1)
|
2017-11-29 12:51:43 +03:00
|
|
|
elog(ERROR,
|
|
|
|
"result of txid_snapshot_xmax() is invalid: %s",
|
|
|
|
PQerrorMessage(conn));
|
2018-02-06 22:15:41 +03:00
|
|
|
if (!parse_time(PQgetvalue(res, 0, 1), &recovery_time))
|
2017-11-29 12:51:43 +03:00
|
|
|
elog(ERROR,
|
|
|
|
"result of current_timestamp is invalid: %s",
|
|
|
|
PQerrorMessage(conn));
|
|
|
|
|
2018-02-06 22:15:41 +03:00
|
|
|
/* Get content for tablespace_map from stop_backup results
|
|
|
|
* in case of non-exclusive backup
|
2017-11-29 12:51:43 +03:00
|
|
|
*/
|
2018-02-06 22:15:41 +03:00
|
|
|
if (!exclusive_backup)
|
2017-11-29 12:51:43 +03:00
|
|
|
val = PQgetvalue(res, 0, 4);
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-11-29 12:51:43 +03:00
|
|
|
/* Write tablespace_map */
|
2018-02-06 22:15:41 +03:00
|
|
|
if (!exclusive_backup && val && strlen(val) > 0)
|
2017-11-29 12:51:43 +03:00
|
|
|
{
|
|
|
|
char tablespace_map[MAXPGPATH];
|
2017-11-01 12:29:28 +03:00
|
|
|
|
2017-11-29 12:51:43 +03:00
|
|
|
join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE);
|
|
|
|
fp = fopen(tablespace_map, "w");
|
2017-03-21 18:30:48 +03:00
|
|
|
if (fp == NULL)
|
2017-11-29 12:51:43 +03:00
|
|
|
elog(ERROR, "can't open tablespace map file \"%s\": %s",
|
|
|
|
tablespace_map, strerror(errno));
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-11-29 12:51:43 +03:00
|
|
|
len = strlen(val);
|
|
|
|
if (fwrite(val, 1, len, fp) != len ||
|
2017-07-06 22:59:47 +03:00
|
|
|
fflush(fp) != 0 ||
|
|
|
|
fsync(fileno(fp)) != 0 ||
|
|
|
|
fclose(fp))
|
2017-11-29 12:51:43 +03:00
|
|
|
elog(ERROR, "can't write tablespace map file \"%s\": %s",
|
|
|
|
tablespace_map, strerror(errno));
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-07-11 13:18:23 +03:00
|
|
|
if (backup_files_list)
|
|
|
|
{
|
2017-11-29 12:51:43 +03:00
|
|
|
file = pgFileNew(tablespace_map, true);
|
|
|
|
if (S_ISREG(file->mode))
|
|
|
|
calc_file_checksum(file);
|
2017-07-11 13:18:23 +03:00
|
|
|
free(file->path);
|
2017-11-29 12:51:43 +03:00
|
|
|
file->path = strdup(PG_TABLESPACE_MAP_FILE);
|
2017-07-11 13:18:23 +03:00
|
|
|
parray_append(backup_files_list, file);
|
|
|
|
}
|
2017-11-01 12:29:28 +03:00
|
|
|
}
|
2017-11-30 15:49:48 +03:00
|
|
|
|
|
|
|
if (tablespace_map_content)
|
|
|
|
PQclear(tablespace_map_content);
|
2017-11-01 12:29:28 +03:00
|
|
|
PQclear(res);
|
2017-11-30 15:49:48 +03:00
|
|
|
|
2017-11-01 12:29:28 +03:00
|
|
|
if (stream_wal)
|
2018-04-05 18:58:40 +03:00
|
|
|
{
|
2017-11-01 12:29:28 +03:00
|
|
|
/* Wait for the completion of stream */
|
|
|
|
pthread_join(stream_thread, NULL);
|
2018-04-05 18:58:40 +03:00
|
|
|
if (stream_thread_arg.ret == 1)
|
|
|
|
elog(ERROR, "WAL streaming failed");
|
|
|
|
}
|
2017-11-01 12:29:28 +03:00
|
|
|
}
|
2017-03-23 17:00:43 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* Fill in fields if that is the correct end of backup. */
|
2017-03-21 18:30:48 +03:00
|
|
|
if (backup != NULL)
|
|
|
|
{
|
2017-03-24 16:58:35 +03:00
|
|
|
char *xlog_path,
|
|
|
|
stream_xlog_path[MAXPGPATH];
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2017-07-12 12:08:14 +03:00
|
|
|
/* Wait for stop_lsn to be received by replica */
|
|
|
|
if (from_replica)
|
|
|
|
wait_replica_wal_lsn(stop_backup_lsn, false);
|
2017-06-07 16:50:33 +03:00
|
|
|
/*
|
|
|
|
* Wait for stop_lsn to be archived or streamed.
|
|
|
|
* We wait for stop_lsn in stream mode just in case.
|
|
|
|
*/
|
2017-07-13 09:39:00 +03:00
|
|
|
wait_wal_lsn(stop_backup_lsn, false);
|
2017-06-07 16:50:33 +03:00
|
|
|
|
2017-03-24 16:58:35 +03:00
|
|
|
if (stream_wal)
|
|
|
|
{
|
2017-05-24 15:17:47 +03:00
|
|
|
pgBackupGetPath2(backup, stream_xlog_path,
|
|
|
|
lengthof(stream_xlog_path),
|
|
|
|
DATABASE_DIR, PG_XLOG_DIR);
|
2017-03-24 16:58:35 +03:00
|
|
|
xlog_path = stream_xlog_path;
|
|
|
|
}
|
2016-09-29 17:33:21 +03:00
|
|
|
else
|
2017-03-24 16:58:35 +03:00
|
|
|
xlog_path = arclog_path;
|
2017-03-21 18:30:48 +03:00
|
|
|
|
2017-03-24 16:58:35 +03:00
|
|
|
backup->tli = get_current_timeline(false);
|
|
|
|
backup->stop_lsn = stop_backup_lsn;
|
2017-03-21 11:54:49 +03:00
|
|
|
|
2017-12-08 16:53:00 +03:00
|
|
|
elog(LOG, "Getting the Recovery Time from WAL");
|
|
|
|
|
2017-03-24 16:58:35 +03:00
|
|
|
if (!read_recovery_info(xlog_path, backup->tli,
|
|
|
|
backup->start_lsn, backup->stop_lsn,
|
|
|
|
&backup->recovery_time, &backup->recovery_xid))
|
|
|
|
{
|
|
|
|
backup->recovery_time = recovery_time;
|
|
|
|
backup->recovery_xid = recovery_xid;
|
|
|
|
}
|
2016-05-26 15:56:32 +03:00
|
|
|
}
|
2013-12-26 21:13:48 +09:00
|
|
|
}
|
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
/*
|
|
|
|
* Retreive checkpoint_timeout GUC value in seconds.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
checkpoint_timeout(void)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
const char *val;
|
|
|
|
const char *hintmsg;
|
|
|
|
int val_int;
|
|
|
|
|
2017-11-29 23:03:12 +03:00
|
|
|
res = pgut_execute(backup_conn, "show checkpoint_timeout", 0, NULL, true);
|
2017-05-29 18:53:48 +03:00
|
|
|
val = PQgetvalue(res, 0, 0);
|
|
|
|
|
|
|
|
if (!parse_int(val, &val_int, OPTION_UNIT_S, &hintmsg))
|
|
|
|
{
|
2017-11-01 12:47:28 +03:00
|
|
|
PQclear(res);
|
2017-05-29 18:53:48 +03:00
|
|
|
if (hintmsg)
|
|
|
|
elog(ERROR, "Invalid value of checkout_timeout %s: %s", val,
|
|
|
|
hintmsg);
|
|
|
|
else
|
|
|
|
elog(ERROR, "Invalid value of checkout_timeout %s", val);
|
|
|
|
}
|
|
|
|
|
2017-11-01 12:47:28 +03:00
|
|
|
PQclear(res);
|
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
return val_int;
|
|
|
|
}
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/*
|
|
|
|
* Notify end of backup to server when "backup_label" is in the root directory
|
|
|
|
* of the DB cluster.
|
|
|
|
* Also update backup status to ERROR when the backup is not finished.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
backup_cleanup(bool fatal, void *userdata)
|
|
|
|
{
|
|
|
|
/*
|
2017-04-19 12:01:10 +03:00
|
|
|
* Update status of backup in BACKUP_CONTROL_FILE to ERROR.
|
2012-05-18 08:54:36 +00:00
|
|
|
* end_time != 0 means backup finished
|
|
|
|
*/
|
|
|
|
if (current.status == BACKUP_STATUS_RUNNING && current.end_time == 0)
|
|
|
|
{
|
2018-03-26 19:50:49 +03:00
|
|
|
elog(WARNING, "Backup %s is running, setting its status to ERROR",
|
2017-12-20 17:57:01 +03:00
|
|
|
base36enc(current.start_time));
|
2012-05-18 08:54:36 +00:00
|
|
|
current.end_time = time(NULL);
|
|
|
|
current.status = BACKUP_STATUS_ERROR;
|
2017-04-19 12:01:10 +03:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
2017-05-19 12:08:16 +03:00
|
|
|
|
|
|
|
/*
|
2017-05-22 14:22:20 +03:00
|
|
|
* If backup is in progress, notify stop of backup to PostgreSQL
|
2017-05-19 12:08:16 +03:00
|
|
|
*/
|
2017-05-22 14:22:20 +03:00
|
|
|
if (backup_in_progress)
|
2017-05-19 12:08:16 +03:00
|
|
|
{
|
2018-03-26 19:50:49 +03:00
|
|
|
elog(WARNING, "backup in progress, stop backup");
|
2017-05-19 12:08:16 +03:00
|
|
|
pg_stop_backup(NULL); /* don't care stop_lsn on error case */
|
|
|
|
}
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
2017-03-21 11:54:49 +03:00
|
|
|
/*
|
|
|
|
* Disconnect backup connection during quit pg_probackup.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
backup_disconnect(bool fatal, void *userdata)
|
|
|
|
{
|
|
|
|
pgut_disconnect(backup_conn);
|
2017-06-07 16:50:33 +03:00
|
|
|
if (master_conn)
|
|
|
|
pgut_disconnect(master_conn);
|
2017-03-21 11:54:49 +03:00
|
|
|
}
|
|
|
|
|
2014-01-30 15:58:55 +09:00
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* Take a backup of the PGDATA at a file level.
|
|
|
|
* Copy all directories and files listed in backup_files_list.
|
|
|
|
* If the file is 'datafile' (regular relation's main fork), read it page by page,
|
|
|
|
* verify checksum and copy.
|
|
|
|
* In incremental backup mode, copy only files or datafiles' pages changed after
|
|
|
|
* previous backup.
|
2014-01-30 15:58:55 +09:00
|
|
|
*/
|
2012-05-18 08:54:36 +00:00
|
|
|
static void
|
2016-02-29 20:23:48 +03:00
|
|
|
backup_files(void *arg)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
2016-02-29 20:23:48 +03:00
|
|
|
backup_files_args *arguments = (backup_files_args *) arg;
|
2017-04-18 11:41:02 +03:00
|
|
|
int n_backup_files_list = parray_num(arguments->backup_files_list);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-09-28 17:40:24 +03:00
|
|
|
/* backup a file */
|
2017-04-18 11:41:02 +03:00
|
|
|
for (i = 0; i < n_backup_files_list; i++)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct stat buf;
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(arguments->backup_files_list, i);
|
2017-10-19 13:33:31 +03:00
|
|
|
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
|
2016-09-02 20:38:39 +03:00
|
|
|
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
|
|
|
|
continue;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
|
|
|
/* check for interrupt */
|
|
|
|
if (interrupted)
|
2016-01-19 12:41:30 +09:00
|
|
|
elog(ERROR, "interrupted during backup");
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
if (progress)
|
|
|
|
elog(LOG, "Progress: (%d/%d). Process file \"%s\"",
|
|
|
|
i + 1, n_backup_files_list, file->path);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* stat file to check its current state */
|
2012-05-18 08:54:36 +00:00
|
|
|
ret = stat(file->path, &buf);
|
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
if (errno == ENOENT)
|
|
|
|
{
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
|
|
|
* If file is not found, this is not en error.
|
|
|
|
* It could have been deleted by concurrent postgres transaction.
|
|
|
|
*/
|
2012-05-18 08:54:36 +00:00
|
|
|
file->write_size = BYTES_INVALID;
|
2017-04-18 11:41:02 +03:00
|
|
|
elog(LOG, "File \"%s\" is not found", file->path);
|
2012-05-18 08:54:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-01-19 12:41:30 +09:00
|
|
|
elog(ERROR,
|
2017-04-18 11:41:02 +03:00
|
|
|
"can't stat file to backup \"%s\": %s",
|
2012-05-18 08:54:36 +00:00
|
|
|
file->path, strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/* We have already copied all directories */
|
2012-05-18 08:54:36 +00:00
|
|
|
if (S_ISDIR(buf.st_mode))
|
2016-02-29 20:23:48 +03:00
|
|
|
continue;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
|
|
|
if (S_ISREG(buf.st_mode))
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2018-03-06 16:10:58 +03:00
|
|
|
/* Check that file exist in previous backup */
|
2018-03-16 11:53:43 +03:00
|
|
|
if (current.backup_mode != BACKUP_MODE_FULL)
|
2018-03-06 16:10:58 +03:00
|
|
|
{
|
|
|
|
int p;
|
|
|
|
char *relative;
|
|
|
|
int n_prev_backup_files_list = parray_num(arguments->prev_backup_filelist);
|
|
|
|
relative = GetRelativePath(file->path, arguments->from_root);
|
|
|
|
for (p = 0; p < n_prev_backup_files_list; p++)
|
|
|
|
{
|
|
|
|
pgFile *prev_file = (pgFile *) parray_get(arguments->prev_backup_filelist, p);
|
|
|
|
if (strcmp(relative, prev_file->path) == 0)
|
|
|
|
{
|
|
|
|
/* File exists in previous backup */
|
|
|
|
file->exists_in_prev = true;
|
2018-03-16 11:53:43 +03:00
|
|
|
// elog(VERBOSE, "File exists at the time of previous backup %s", relative);
|
2018-03-06 16:10:58 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-05-18 08:54:36 +00:00
|
|
|
/* copy the file into backup */
|
2017-10-19 13:33:31 +03:00
|
|
|
if (file->is_datafile && !file->is_cfs)
|
2017-02-13 11:44:53 +03:00
|
|
|
{
|
2017-10-19 13:33:31 +03:00
|
|
|
/* backup block by block if datafile AND not compressed by cfs*/
|
2018-01-16 14:16:50 +03:00
|
|
|
if (!backup_data_file(arguments,
|
|
|
|
arguments->from_root,
|
2017-04-18 11:41:02 +03:00
|
|
|
arguments->to_root, file,
|
2017-11-01 15:13:02 +03:00
|
|
|
arguments->prev_backup_start_lsn,
|
|
|
|
current.backup_mode))
|
2017-02-13 11:44:53 +03:00
|
|
|
{
|
|
|
|
file->write_size = BYTES_INVALID;
|
2018-02-06 20:37:45 +03:00
|
|
|
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
|
2017-02-13 11:44:53 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2018-03-16 11:53:43 +03:00
|
|
|
else
|
|
|
|
/* TODO:
|
|
|
|
* Check if file exists in previous backup
|
|
|
|
* If exists:
|
|
|
|
* if mtime > start_backup_time of parent backup,
|
|
|
|
* copy file to backup
|
|
|
|
* if mtime < start_backup_time
|
|
|
|
* calculate crc, compare crc to old file
|
|
|
|
* if crc is the same -> skip file
|
|
|
|
*/
|
|
|
|
if (!copy_file(arguments->from_root,
|
2017-02-13 11:44:53 +03:00
|
|
|
arguments->to_root,
|
|
|
|
file))
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
|
|
|
file->write_size = BYTES_INVALID;
|
2018-02-06 20:37:45 +03:00
|
|
|
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
|
2012-05-18 08:54:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-12-08 16:24:35 +03:00
|
|
|
elog(VERBOSE, "File \"%s\". Copied %lu bytes",
|
2017-04-18 11:41:02 +03:00
|
|
|
file->path, (unsigned long) file->write_size);
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
else
|
2016-01-14 16:36:39 +09:00
|
|
|
elog(LOG, "unexpected file type %d", buf.st_mode);
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
2018-01-23 13:39:49 +03:00
|
|
|
|
|
|
|
/* Close connection */
|
|
|
|
if (arguments->thread_backup_conn)
|
|
|
|
pgut_disconnect(arguments->thread_backup_conn);
|
|
|
|
|
2018-04-05 18:58:40 +03:00
|
|
|
/* Data files transferring is successful */
|
|
|
|
arguments->ret = 0;
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-09-28 15:12:12 +03:00
|
|
|
* Extract information about files in backup_list parsing their names:
|
|
|
|
* - remove temp tables from the list
|
2017-11-13 18:52:09 +03:00
|
|
|
* - remove unlogged tables from the list (leave the _init fork)
|
2017-10-02 00:57:38 +03:00
|
|
|
* - set flags for database directories
|
2017-09-28 15:12:12 +03:00
|
|
|
* - set flags for datafiles
|
2012-05-18 08:54:36 +00:00
|
|
|
*/
|
|
|
|
static void
|
2017-09-29 20:25:11 +03:00
|
|
|
parse_backup_filelist_filenames(parray *files, const char *root)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2018-04-24 17:45:30 +03:00
|
|
|
size_t i = 0;
|
|
|
|
Oid unlogged_file_reloid = 0;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
while (i < parray_num(files))
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(files, i);
|
2017-02-16 17:23:43 +03:00
|
|
|
char *relative;
|
2017-09-29 20:25:11 +03:00
|
|
|
int sscanf_result;
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
relative = GetRelativePath(file->path, root);
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
if (S_ISREG(file->mode) &&
|
|
|
|
path_is_prefix_of_path(PG_TBLSPC_DIR, relative))
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2018-04-24 17:45:30 +03:00
|
|
|
/*
|
|
|
|
* Found file in pg_tblspc/tblsOid/TABLESPACE_VERSION_DIRECTORY
|
|
|
|
* Legal only in case of 'pg_compression'
|
|
|
|
*/
|
|
|
|
if (strcmp(file->name, "pg_compression") == 0)
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2018-04-24 17:45:30 +03:00
|
|
|
Oid tblspcOid;
|
|
|
|
Oid dbOid;
|
|
|
|
char tmp_rel_path[MAXPGPATH];
|
|
|
|
/*
|
|
|
|
* Check that the file is located under
|
|
|
|
* TABLESPACE_VERSION_DIRECTORY
|
|
|
|
*/
|
|
|
|
sscanf_result = sscanf(relative, PG_TBLSPC_DIR "/%u/%s/%u",
|
|
|
|
&tblspcOid, tmp_rel_path, &dbOid);
|
2017-10-19 13:33:31 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
/* Yes, it is */
|
|
|
|
if (sscanf_result == 2 &&
|
|
|
|
strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
|
|
|
|
set_cfs_datafiles(files, root, relative, i);
|
2016-02-27 21:07:55 +03:00
|
|
|
}
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
if (S_ISREG(file->mode) && file->tblspcOid != 0 &&
|
|
|
|
file->name && file->name[0])
|
2016-09-06 16:30:54 +03:00
|
|
|
{
|
2018-04-24 17:45:30 +03:00
|
|
|
if (strcmp(file->forkName, "init") == 0)
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2017-10-02 18:31:46 +03:00
|
|
|
/*
|
2018-04-24 17:45:30 +03:00
|
|
|
* Do not backup files of unlogged relations.
|
|
|
|
* scan filelist backward and exclude these files.
|
2017-10-02 18:31:46 +03:00
|
|
|
*/
|
2018-04-24 17:45:30 +03:00
|
|
|
int unlogged_file_num = i - 1;
|
|
|
|
pgFile *unlogged_file = (pgFile *) parray_get(files,
|
|
|
|
unlogged_file_num);
|
2017-02-13 11:44:53 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
unlogged_file_reloid = file->relOid;
|
2017-11-13 18:52:09 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
pgFileFree(file);
|
|
|
|
parray_remove(files, i);
|
2017-02-13 11:44:53 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
while (unlogged_file_num >= 0 &&
|
|
|
|
(unlogged_file_reloid != 0) &&
|
|
|
|
(unlogged_file->relOid == unlogged_file_reloid))
|
2017-02-16 17:23:43 +03:00
|
|
|
{
|
2018-04-24 17:45:30 +03:00
|
|
|
pgFileFree(unlogged_file);
|
|
|
|
parray_remove(files, unlogged_file_num);
|
2017-11-13 18:52:09 +03:00
|
|
|
|
2018-04-24 17:45:30 +03:00
|
|
|
unlogged_file_num--;
|
2017-11-13 18:52:09 +03:00
|
|
|
i--;
|
2018-04-24 17:45:30 +03:00
|
|
|
|
|
|
|
unlogged_file = (pgFile *) parray_get(files,
|
|
|
|
unlogged_file_num);
|
2017-11-13 18:52:09 +03:00
|
|
|
}
|
2018-04-24 17:45:30 +03:00
|
|
|
|
|
|
|
continue;
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
2017-02-16 17:23:43 +03:00
|
|
|
}
|
2018-04-24 17:45:30 +03:00
|
|
|
|
|
|
|
i++;
|
2017-10-19 13:33:31 +03:00
|
|
|
}
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 13:33:31 +03:00
|
|
|
/* If file is equal to pg_compression, then we consider this tablespace as
|
|
|
|
* cfs-compressed and should mark every file in this tablespace as cfs-file
|
|
|
|
* Setting is_cfs is done via going back through 'files' set every file
|
|
|
|
* that contain cfs_tablespace in his path as 'is_cfs'
|
|
|
|
* Goings back through array 'files' is valid option possible because of current
|
|
|
|
* sort rules:
|
|
|
|
* tblspcOid/TABLESPACE_VERSION_DIRECTORY
|
|
|
|
* tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid
|
|
|
|
* tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1
|
|
|
|
* tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1.cfm
|
|
|
|
* tblspcOid/TABLESPACE_VERSION_DIRECTORY/pg_compression
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i)
|
|
|
|
{
|
2017-11-01 11:50:37 +03:00
|
|
|
int len;
|
|
|
|
int p;
|
|
|
|
pgFile *prev_file;
|
|
|
|
char *cfs_tblspc_path;
|
|
|
|
char *relative_prev_file;
|
2017-10-19 13:33:31 +03:00
|
|
|
|
|
|
|
cfs_tblspc_path = strdup(relative);
|
2018-02-02 17:33:45 +03:00
|
|
|
if(!cfs_tblspc_path)
|
|
|
|
elog(ERROR, "Out of memory");
|
2017-10-19 13:33:31 +03:00
|
|
|
len = strlen("/pg_compression");
|
|
|
|
cfs_tblspc_path[strlen(cfs_tblspc_path) - len] = 0;
|
|
|
|
elog(VERBOSE, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative);
|
|
|
|
|
2017-11-01 11:50:37 +03:00
|
|
|
for (p = (int) i; p >= 0; p--)
|
2017-10-19 13:33:31 +03:00
|
|
|
{
|
2017-11-01 11:50:37 +03:00
|
|
|
prev_file = (pgFile *) parray_get(files, (size_t) p);
|
2017-10-19 13:33:31 +03:00
|
|
|
relative_prev_file = GetRelativePath(prev_file->path, root);
|
2017-10-23 10:21:39 +03:00
|
|
|
|
|
|
|
elog(VERBOSE, "Checking file in cfs tablespace %s", relative_prev_file);
|
|
|
|
|
2017-10-19 13:33:31 +03:00
|
|
|
if (strstr(relative_prev_file, cfs_tblspc_path) != NULL)
|
|
|
|
{
|
|
|
|
if (S_ISREG(prev_file->mode) && prev_file->is_datafile)
|
|
|
|
{
|
2017-10-23 10:21:39 +03:00
|
|
|
elog(VERBOSE, "Setting 'is_cfs' on file %s, name %s",
|
|
|
|
relative_prev_file, prev_file->name);
|
2017-10-19 13:33:31 +03:00
|
|
|
prev_file->is_cfs = true;
|
|
|
|
}
|
|
|
|
}
|
2017-10-23 10:21:39 +03:00
|
|
|
else
|
|
|
|
{
|
|
|
|
elog(VERBOSE, "Breaking on %s", relative_prev_file);
|
|
|
|
break;
|
|
|
|
}
|
2017-10-19 13:33:31 +03:00
|
|
|
}
|
2017-10-23 10:21:39 +03:00
|
|
|
free(cfs_tblspc_path);
|
2017-10-19 13:33:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-18 08:54:36 +00:00
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* Output the list of files to backup catalog DATABASE_FILE_LIST
|
2012-05-18 08:54:36 +00:00
|
|
|
*/
|
|
|
|
static void
|
2017-04-18 11:41:02 +03:00
|
|
|
write_backup_file_list(parray *files, const char *root)
|
2012-05-18 08:54:36 +00:00
|
|
|
{
|
2017-02-25 15:12:07 +03:00
|
|
|
FILE *fp;
|
|
|
|
char path[MAXPGPATH];
|
2012-05-18 08:54:36 +00:00
|
|
|
|
2017-04-12 17:39:20 +03:00
|
|
|
pgBackupGetPath(¤t, path, lengthof(path), DATABASE_FILE_LIST);
|
2017-02-25 15:12:07 +03:00
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
fp = fopen(path, "wt");
|
2017-04-12 17:39:20 +03:00
|
|
|
if (fp == NULL)
|
|
|
|
elog(ERROR, "cannot open file list \"%s\": %s", path,
|
|
|
|
strerror(errno));
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-04-12 17:39:20 +03:00
|
|
|
print_file_list(fp, files, root);
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-07-06 22:59:47 +03:00
|
|
|
if (fflush(fp) != 0 ||
|
|
|
|
fsync(fileno(fp)) != 0 ||
|
|
|
|
fclose(fp))
|
|
|
|
elog(ERROR, "cannot write file list \"%s\": %s", path, strerror(errno));
|
2012-05-18 08:54:36 +00:00
|
|
|
}
|
2016-01-15 23:47:38 +09:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A helper function to create the path of a relation file and segment.
|
|
|
|
* The returned path is palloc'd
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
char *segpath;
|
|
|
|
|
|
|
|
path = relpathperm(rnode, forknum);
|
|
|
|
if (segno > 0)
|
|
|
|
{
|
|
|
|
segpath = psprintf("%s.%u", path, segno);
|
|
|
|
pfree(path);
|
|
|
|
return segpath;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-18 11:41:02 +03:00
|
|
|
* Find pgfile by given rnode in the backup_files_list
|
|
|
|
* and add given blkno to its pagemap.
|
2016-01-15 23:47:38 +09:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
char *rel_path;
|
|
|
|
BlockNumber blkno_inseg;
|
|
|
|
int segno;
|
|
|
|
pgFile *file_item = NULL;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
segno = blkno / RELSEG_SIZE;
|
|
|
|
blkno_inseg = blkno % RELSEG_SIZE;
|
|
|
|
|
|
|
|
rel_path = datasegpath(rnode, forknum, segno);
|
|
|
|
path = pg_malloc(strlen(rel_path) + strlen(pgdata) + 2);
|
|
|
|
sprintf(path, "%s/%s", pgdata, rel_path);
|
|
|
|
|
|
|
|
for (j = 0; j < parray_num(backup_files_list); j++)
|
|
|
|
{
|
|
|
|
pgFile *p = (pgFile *) parray_get(backup_files_list, j);
|
|
|
|
|
|
|
|
if (strcmp(p->path, path) == 0)
|
|
|
|
{
|
|
|
|
file_item = p;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have any record of this file in the file map, it means
|
|
|
|
* that it's a relation that did not have much activity since the last
|
|
|
|
* backup. We can safely ignore it. If it is a new relation file, the
|
|
|
|
* backup would simply copy it as-is.
|
|
|
|
*/
|
|
|
|
if (file_item)
|
|
|
|
datapagemap_add(&file_item->pagemap, blkno_inseg);
|
|
|
|
|
|
|
|
pg_free(path);
|
|
|
|
pg_free(rel_path);
|
|
|
|
}
|
2016-02-27 21:07:55 +03:00
|
|
|
|
2017-06-07 16:28:22 +03:00
|
|
|
/*
|
|
|
|
* Given a list of files in the instance to backup, build a pagemap for each
|
|
|
|
* data file that has ptrack. Result is saved in the pagemap field of pgFile.
|
2017-09-29 20:25:11 +03:00
|
|
|
* NOTE we rely on the fact that provided parray is sorted by file->path.
|
2017-06-07 16:28:22 +03:00
|
|
|
*/
|
2017-02-25 15:12:07 +03:00
|
|
|
static void
|
|
|
|
make_pagemap_from_ptrack(parray *files)
|
2016-02-27 21:07:55 +03:00
|
|
|
{
|
2017-04-24 18:22:34 +03:00
|
|
|
size_t i;
|
2017-09-29 20:25:11 +03:00
|
|
|
Oid dbOid_with_ptrack_init = 0;
|
|
|
|
Oid tblspcOid_with_ptrack_init = 0;
|
2017-11-09 12:47:37 +03:00
|
|
|
char *ptrack_nonparsed = NULL;
|
|
|
|
size_t ptrack_nonparsed_size = 0;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2018-02-06 20:37:45 +03:00
|
|
|
elog(LOG, "Compiling pagemap");
|
2016-02-27 21:07:55 +03:00
|
|
|
for (i = 0; i < parray_num(files); i++)
|
|
|
|
{
|
2017-09-29 20:25:11 +03:00
|
|
|
pgFile *file = (pgFile *) parray_get(files, i);
|
|
|
|
size_t start_addr;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
/*
|
|
|
|
* If there is a ptrack_init file in the database,
|
|
|
|
* we must backup all its files, ignoring ptrack files for relations.
|
|
|
|
*/
|
|
|
|
if (file->is_database)
|
2016-02-27 21:07:55 +03:00
|
|
|
{
|
2017-10-01 18:28:39 +03:00
|
|
|
char *filename = strrchr(file->path, '/');
|
|
|
|
|
|
|
|
Assert(filename != NULL);
|
|
|
|
filename++;
|
|
|
|
|
2017-04-18 11:41:02 +03:00
|
|
|
/*
|
2017-09-29 20:25:11 +03:00
|
|
|
* The function pg_ptrack_get_and_clear_db returns true
|
2017-10-02 00:57:38 +03:00
|
|
|
* if there was a ptrack_init file.
|
2017-10-02 18:31:46 +03:00
|
|
|
* Also ignore ptrack files for global tablespace,
|
|
|
|
* to avoid any possible specific errors.
|
2017-04-18 11:41:02 +03:00
|
|
|
*/
|
2017-10-03 17:57:48 +03:00
|
|
|
if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
|
2017-10-02 00:57:38 +03:00
|
|
|
pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid))
|
2017-04-24 18:22:34 +03:00
|
|
|
{
|
2017-09-29 20:25:11 +03:00
|
|
|
dbOid_with_ptrack_init = file->dbOid;
|
|
|
|
tblspcOid_with_ptrack_init = file->tblspcOid;
|
2017-04-24 18:22:34 +03:00
|
|
|
}
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
2017-04-24 18:22:34 +03:00
|
|
|
|
2017-10-03 17:57:48 +03:00
|
|
|
if (file->is_datafile)
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2017-11-09 12:47:37 +03:00
|
|
|
if (file->tblspcOid == tblspcOid_with_ptrack_init
|
|
|
|
&& file->dbOid == dbOid_with_ptrack_init)
|
2017-11-10 14:13:44 +03:00
|
|
|
{
|
|
|
|
/* ignore ptrack if ptrack_init exists */
|
|
|
|
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
|
|
|
|
file->pagemap.bitmapsize = PageBitmapIsAbsent;
|
|
|
|
continue;
|
|
|
|
}
|
2017-11-09 12:47:37 +03:00
|
|
|
|
2017-09-29 20:25:11 +03:00
|
|
|
/* get ptrack bitmap once for all segments of the file */
|
|
|
|
if (file->segno == 0)
|
2017-09-26 20:50:06 +03:00
|
|
|
{
|
2017-09-29 20:25:11 +03:00
|
|
|
/* release previous value */
|
2017-11-09 12:47:37 +03:00
|
|
|
pg_free(ptrack_nonparsed);
|
|
|
|
ptrack_nonparsed_size = 0;
|
2017-09-29 20:25:11 +03:00
|
|
|
|
|
|
|
ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid,
|
|
|
|
file->relOid, &ptrack_nonparsed_size);
|
2017-09-26 20:50:06 +03:00
|
|
|
}
|
2017-06-07 16:28:22 +03:00
|
|
|
|
2017-09-01 13:04:30 +03:00
|
|
|
if (ptrack_nonparsed != NULL)
|
|
|
|
{
|
2017-11-09 12:47:37 +03:00
|
|
|
/*
|
2017-11-10 14:13:44 +03:00
|
|
|
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cutted out.
|
|
|
|
* Compute the beginning of the ptrack map related to this segment
|
|
|
|
*
|
|
|
|
* HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
|
|
|
|
* RELSEG_SIZE. Number of Pages per segment: 131072
|
|
|
|
* RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed
|
|
|
|
* to keep track on one relsegment: 16384
|
|
|
|
*/
|
2017-11-09 12:47:37 +03:00
|
|
|
start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
|
|
|
|
|
|
|
|
if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2017-11-09 12:47:37 +03:00
|
|
|
file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
|
|
|
|
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
2017-09-01 13:04:30 +03:00
|
|
|
else
|
2017-09-29 20:25:11 +03:00
|
|
|
{
|
2017-11-09 12:47:37 +03:00
|
|
|
file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
|
|
|
|
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
2017-09-29 20:25:11 +03:00
|
|
|
}
|
2017-11-09 12:47:37 +03:00
|
|
|
|
|
|
|
file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
|
|
|
|
memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
|
2017-09-01 13:04:30 +03:00
|
|
|
}
|
2017-11-09 12:47:37 +03:00
|
|
|
else
|
2017-11-10 14:13:44 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If ptrack file is missing, try to copy the entire file.
|
|
|
|
* It can happen in two cases:
|
|
|
|
* - files were created by commands that bypass buffer manager
|
|
|
|
* and, correspondingly, ptrack mechanism.
|
|
|
|
* i.e. CREATE DATABASE
|
|
|
|
* - target relation was deleted.
|
|
|
|
*/
|
|
|
|
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
|
|
|
file->pagemap.bitmapsize = PageBitmapIsAbsent;
|
|
|
|
}
|
2016-02-27 21:07:55 +03:00
|
|
|
}
|
|
|
|
}
|
2018-02-06 20:37:45 +03:00
|
|
|
elog(LOG, "Pagemap compiled");
|
|
|
|
// res = pgut_execute(backup_conn, "SET client_min_messages = warning;", 0, NULL, true);
|
|
|
|
// PQclear(pgut_execute(backup_conn, "CHECKPOINT;", 0, NULL, true));
|
2016-02-27 21:07:55 +03:00
|
|
|
}
|
2016-05-26 15:56:32 +03:00
|
|
|
|
|
|
|
|
2017-04-21 14:54:33 +03:00
|
|
|
/*
|
|
|
|
* Stop WAL streaming if current 'xlogpos' exceeds 'stop_backup_lsn', which is
|
|
|
|
* set by pg_stop_backup().
|
|
|
|
*/
|
2016-05-26 15:56:32 +03:00
|
|
|
static bool
|
|
|
|
stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
|
|
|
|
{
|
|
|
|
static uint32 prevtimeline = 0;
|
|
|
|
static XLogRecPtr prevpos = InvalidXLogRecPtr;
|
|
|
|
|
|
|
|
/* we assume that we get called once at the end of each segment */
|
2017-04-26 17:00:06 +03:00
|
|
|
if (segment_finished)
|
2018-04-04 12:40:35 +03:00
|
|
|
elog(VERBOSE, _("finished segment at %X/%X (timeline %u)"),
|
2017-04-26 17:00:06 +03:00
|
|
|
(uint32) (xlogpos >> 32), (uint32) xlogpos, timeline);
|
2016-05-26 15:56:32 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we report the previous, not current, position here. After a
|
|
|
|
* timeline switch, xlogpos points to the beginning of the segment because
|
|
|
|
* that's where we always begin streaming. Reporting the end of previous
|
|
|
|
* timeline isn't totally accurate, because the next timeline can begin
|
|
|
|
* slightly before the end of the WAL that we received on the previous
|
|
|
|
* timeline, but it's close enough for reporting purposes.
|
|
|
|
*/
|
|
|
|
if (prevtimeline != 0 && prevtimeline != timeline)
|
2017-04-26 17:00:06 +03:00
|
|
|
elog(LOG, _("switched to timeline %u at %X/%X\n"),
|
|
|
|
timeline, (uint32) (prevpos >> 32), (uint32) prevpos);
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
if (!XLogRecPtrIsInvalid(stop_backup_lsn))
|
|
|
|
{
|
|
|
|
if (xlogpos > stop_backup_lsn)
|
2018-04-04 12:40:35 +03:00
|
|
|
{
|
2018-04-04 17:06:50 +03:00
|
|
|
stop_stream_lsn = xlogpos;
|
2017-05-29 18:53:48 +03:00
|
|
|
return true;
|
2018-04-04 12:40:35 +03:00
|
|
|
}
|
2017-05-29 18:53:48 +03:00
|
|
|
|
|
|
|
/* pg_stop_backup() was executed, wait for the completion of stream */
|
|
|
|
if (stream_stop_timeout == 0)
|
|
|
|
{
|
|
|
|
elog(INFO, "Wait for LSN %X/%X to be streamed",
|
|
|
|
(uint32) (stop_backup_lsn >> 32), (uint32) stop_backup_lsn);
|
|
|
|
|
|
|
|
stream_stop_timeout = checkpoint_timeout();
|
|
|
|
stream_stop_timeout = stream_stop_timeout + stream_stop_timeout * 0.1;
|
|
|
|
|
|
|
|
stream_stop_begin = time(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (time(NULL) - stream_stop_begin > stream_stop_timeout)
|
|
|
|
elog(ERROR, "Target LSN %X/%X could not be streamed in %d seconds",
|
|
|
|
(uint32) (stop_backup_lsn >> 32), (uint32) stop_backup_lsn,
|
|
|
|
stream_stop_timeout);
|
|
|
|
}
|
2016-05-26 15:56:32 +03:00
|
|
|
|
|
|
|
prevtimeline = timeline;
|
|
|
|
prevpos = xlogpos;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start the log streaming
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
StreamLog(void *arg)
|
|
|
|
{
|
|
|
|
XLogRecPtr startpos;
|
|
|
|
TimeLineID starttli;
|
2018-04-05 18:58:40 +03:00
|
|
|
StreamThreadArg *stream_arg = (StreamThreadArg *) arg;
|
2017-04-18 11:41:02 +03:00
|
|
|
|
2016-10-13 17:25:53 +03:00
|
|
|
/*
|
|
|
|
* We must use startpos as start_lsn from start_backup
|
|
|
|
*/
|
|
|
|
startpos = current.start_lsn;
|
2018-04-10 19:02:00 +03:00
|
|
|
starttli = current.tli;
|
2016-05-26 15:56:32 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Always start streaming at the beginning of a segment
|
|
|
|
*/
|
|
|
|
startpos -= startpos % XLOG_SEG_SIZE;
|
|
|
|
|
2017-05-29 18:53:48 +03:00
|
|
|
/* Initialize timeout */
|
|
|
|
stream_stop_timeout = 0;
|
|
|
|
stream_stop_begin = 0;
|
|
|
|
|
2016-05-26 15:56:32 +03:00
|
|
|
/*
|
|
|
|
* Start the replication
|
|
|
|
*/
|
2018-03-26 19:50:49 +03:00
|
|
|
elog(LOG, _("started streaming WAL at %X/%X (timeline %u)"),
|
2017-04-26 17:00:06 +03:00
|
|
|
(uint32) (startpos >> 32), (uint32) startpos, starttli);
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2016-08-30 17:21:02 +03:00
|
|
|
#if PG_VERSION_NUM >= 90600
|
2016-12-20 16:22:08 +03:00
|
|
|
{
|
2017-08-07 16:23:37 +03:00
|
|
|
StreamCtl ctl;
|
|
|
|
|
2017-08-21 16:36:25 +03:00
|
|
|
MemSet(&ctl, 0, sizeof(ctl));
|
|
|
|
|
2016-12-20 16:22:08 +03:00
|
|
|
ctl.startpos = startpos;
|
|
|
|
ctl.timeline = starttli;
|
|
|
|
ctl.sysidentifier = NULL;
|
2017-08-07 16:23:37 +03:00
|
|
|
|
|
|
|
#if PG_VERSION_NUM >= 100000
|
2018-04-05 18:58:40 +03:00
|
|
|
ctl.walmethod = CreateWalDirectoryMethod(stream_arg->basedir, 0, true);
|
2017-08-07 16:23:37 +03:00
|
|
|
ctl.replication_slot = replication_slot;
|
2017-08-21 16:36:25 +03:00
|
|
|
ctl.stop_socket = PGINVALID_SOCKET;
|
2017-08-07 16:23:37 +03:00
|
|
|
#else
|
2018-04-09 00:10:52 +03:00
|
|
|
ctl.basedir = (char *) stream_arg->basedir;
|
2017-08-07 16:23:37 +03:00
|
|
|
#endif
|
|
|
|
|
2016-12-20 16:22:08 +03:00
|
|
|
ctl.stream_stop = stop_streaming;
|
|
|
|
ctl.standby_message_timeout = standby_message_timeout;
|
|
|
|
ctl.partial_suffix = NULL;
|
|
|
|
ctl.synchronous = false;
|
|
|
|
ctl.mark_done = false;
|
2017-08-07 16:23:37 +03:00
|
|
|
|
2018-04-10 19:02:00 +03:00
|
|
|
if(ReceiveXlogStream(stream_arg->conn, &ctl) == false)
|
2017-05-24 14:04:21 +03:00
|
|
|
elog(ERROR, "Problem in receivexlog");
|
2017-08-07 16:23:37 +03:00
|
|
|
|
|
|
|
#if PG_VERSION_NUM >= 100000
|
|
|
|
if (!ctl.walmethod->finish())
|
|
|
|
elog(ERROR, "Could not finish writing WAL files: %s",
|
|
|
|
strerror(errno));
|
|
|
|
#endif
|
2016-12-20 16:22:08 +03:00
|
|
|
}
|
2016-08-30 17:21:02 +03:00
|
|
|
#else
|
2018-04-10 19:02:00 +03:00
|
|
|
if(ReceiveXlogStream(stream_arg->conn, startpos, starttli, NULL, basedir,
|
|
|
|
stop_streaming, standby_message_timeout, NULL,
|
|
|
|
false, false) == false)
|
2017-05-24 14:04:21 +03:00
|
|
|
elog(ERROR, "Problem in receivexlog");
|
2016-08-30 17:21:02 +03:00
|
|
|
#endif
|
2016-05-26 15:56:32 +03:00
|
|
|
|
2018-04-04 17:06:50 +03:00
|
|
|
elog(LOG, _("finished streaming WAL at %X/%X (timeline %u)"),
|
|
|
|
(uint32) (stop_stream_lsn >> 32), (uint32) stop_stream_lsn, starttli);
|
2018-04-05 18:58:40 +03:00
|
|
|
stream_arg->ret = 0;
|
2018-04-04 17:06:50 +03:00
|
|
|
|
2018-04-10 19:02:00 +03:00
|
|
|
PQfinish(stream_arg->conn);
|
|
|
|
stream_arg->conn = NULL;
|
2016-05-26 15:56:32 +03:00
|
|
|
}
|
2017-10-02 18:31:46 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get lsn of the moment when ptrack was enabled the last time.
|
|
|
|
*/
|
|
|
|
static XLogRecPtr
|
|
|
|
get_last_ptrack_lsn(void)
|
|
|
|
|
|
|
|
{
|
|
|
|
PGresult *res;
|
2017-10-02 21:05:24 +03:00
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
2017-10-02 18:31:46 +03:00
|
|
|
XLogRecPtr lsn;
|
|
|
|
|
2018-03-02 19:20:40 +03:00
|
|
|
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()", 0, NULL, true);
|
2017-10-02 18:31:46 +03:00
|
|
|
|
2017-10-02 21:05:24 +03:00
|
|
|
/* Extract timeline and LSN from results of pg_start_backup() */
|
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
|
|
|
/* Calculate LSN */
|
|
|
|
lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
2017-10-02 18:31:46 +03:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
return lsn;
|
|
|
|
}
|
2017-12-14 17:46:30 +03:00
|
|
|
|
|
|
|
char *
|
2018-01-16 14:16:50 +03:00
|
|
|
pg_ptrack_get_block(backup_files_args *arguments,
|
|
|
|
Oid dbOid,
|
2017-12-28 15:09:10 +03:00
|
|
|
Oid tblsOid,
|
2017-12-27 18:49:30 +03:00
|
|
|
Oid relOid,
|
2017-12-14 17:46:30 +03:00
|
|
|
BlockNumber blknum,
|
|
|
|
size_t *result_size)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
2017-12-28 16:28:38 +03:00
|
|
|
char *params[4];
|
2017-12-14 17:46:30 +03:00
|
|
|
char *result;
|
|
|
|
|
|
|
|
params[0] = palloc(64);
|
|
|
|
params[1] = palloc(64);
|
2017-12-27 18:49:30 +03:00
|
|
|
params[2] = palloc(64);
|
2017-12-28 16:28:38 +03:00
|
|
|
params[3] = palloc(64);
|
|
|
|
|
2017-12-14 17:46:30 +03:00
|
|
|
/*
|
2018-01-16 11:54:15 +03:00
|
|
|
* Use tmp_conn, since we may work in parallel threads.
|
|
|
|
* We can connect to any database.
|
2017-12-14 17:46:30 +03:00
|
|
|
*/
|
2017-12-27 18:49:30 +03:00
|
|
|
sprintf(params[0], "%i", tblsOid);
|
2017-12-28 16:28:38 +03:00
|
|
|
sprintf(params[1], "%i", dbOid);
|
|
|
|
sprintf(params[2], "%i", relOid);
|
|
|
|
sprintf(params[3], "%u", blknum);
|
2017-12-27 18:49:30 +03:00
|
|
|
|
2018-01-16 14:16:50 +03:00
|
|
|
if (arguments->thread_backup_conn == NULL)
|
2018-01-16 15:19:48 +03:00
|
|
|
{
|
2018-01-16 14:16:50 +03:00
|
|
|
arguments->thread_backup_conn = pgut_connect(pgut_dbname);
|
2018-01-16 15:19:48 +03:00
|
|
|
}
|
2018-01-23 13:39:49 +03:00
|
|
|
|
|
|
|
if (arguments->thread_cancel_conn == NULL)
|
|
|
|
arguments->thread_cancel_conn = PQgetCancel(arguments->thread_backup_conn);
|
2018-01-16 11:54:15 +03:00
|
|
|
|
2017-12-28 16:28:38 +03:00
|
|
|
//elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
|
2018-01-16 14:16:50 +03:00
|
|
|
res = pgut_execute_parallel(arguments->thread_backup_conn,
|
|
|
|
arguments->thread_cancel_conn,
|
2018-03-02 19:20:40 +03:00
|
|
|
"SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
|
2017-12-28 16:28:38 +03:00
|
|
|
4, (const char **)params, true);
|
2017-12-14 17:46:30 +03:00
|
|
|
|
|
|
|
if (PQnfields(res) != 1)
|
2017-12-28 16:28:38 +03:00
|
|
|
{
|
2018-01-16 11:54:15 +03:00
|
|
|
elog(VERBOSE, "cannot get file block for relation oid %u",
|
2017-12-28 16:28:38 +03:00
|
|
|
relOid);
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-12-14 17:46:30 +03:00
|
|
|
|
2018-01-12 21:11:09 +03:00
|
|
|
if (PQgetisnull(res, 0, 0))
|
|
|
|
{
|
2018-01-16 11:54:15 +03:00
|
|
|
elog(VERBOSE, "cannot get file block for relation oid %u",
|
2018-01-12 21:11:09 +03:00
|
|
|
relOid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-12-14 17:46:30 +03:00
|
|
|
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
|
|
|
result_size);
|
2017-12-28 15:09:10 +03:00
|
|
|
|
2017-12-14 17:46:30 +03:00
|
|
|
PQclear(res);
|
2018-01-16 11:54:15 +03:00
|
|
|
|
2017-12-14 17:46:30 +03:00
|
|
|
pfree(params[0]);
|
|
|
|
pfree(params[1]);
|
2017-12-27 18:49:30 +03:00
|
|
|
pfree(params[2]);
|
2017-12-28 16:28:38 +03:00
|
|
|
pfree(params[3]);
|
2017-12-14 17:46:30 +03:00
|
|
|
|
|
|
|
return result;
|
2018-02-09 13:22:01 +03:00
|
|
|
}
|