2012-05-18 11:54:36 +03:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
2014-01-24 14:37:13 +03:00
|
|
|
* backup.c: backup DB cluster, archived WAL
|
2012-05-18 11:54:36 +03:00
|
|
|
*
|
2017-03-01 15:50:07 +02:00
|
|
|
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
|
|
|
|
* Portions Copyright (c) 2015-2017, Postgres Professional
|
2012-05-18 11:54:36 +03:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2016-11-16 19:34:21 +02:00
|
|
|
#include "pg_probackup.h"
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2016-04-15 14:58:58 +02:00
|
|
|
#include <string.h>
|
2012-05-18 11:54:36 +03:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <time.h>
|
2016-02-29 19:23:48 +02:00
|
|
|
#include <pthread.h>
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
#include "libpq/pqsignal.h"
|
2016-02-27 20:07:55 +02:00
|
|
|
#include "storage/bufpage.h"
|
|
|
|
#include "datapagemap.h"
|
2016-05-26 14:56:32 +02:00
|
|
|
#include "streamutil.h"
|
|
|
|
#include "receivelog.h"
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-19 11:01:10 +02:00
|
|
|
static const char *backupModes[] = {"", "PAGE", "PTRACK", "FULL"};
|
2016-05-26 14:56:32 +02:00
|
|
|
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
|
|
|
|
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
|
2016-11-16 19:34:21 +02:00
|
|
|
const char *progname = "pg_probackup";
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2016-01-15 16:47:38 +02:00
|
|
|
/* list of files contained in backup */
|
2017-02-25 14:12:07 +02:00
|
|
|
static parray *backup_files_list = NULL;
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2016-11-14 17:10:02 +02:00
|
|
|
static pthread_mutex_t check_stream_mut = PTHREAD_MUTEX_INITIALIZER;
|
2016-01-15 16:47:38 +02:00
|
|
|
|
2017-04-05 18:48:55 +02:00
|
|
|
static int is_ptrack_enable = false;
|
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
/* Backup connection */
|
|
|
|
static PGconn *backup_conn = NULL;
|
|
|
|
|
2016-02-29 19:23:48 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
const char *from_root;
|
|
|
|
const char *to_root;
|
2017-04-18 10:41:02 +02:00
|
|
|
parray *backup_files_list;
|
|
|
|
parray *prev_backup_filelist;
|
|
|
|
const XLogRecPtr *prev_backup_start_lsn;
|
2016-02-29 19:23:48 +02:00
|
|
|
} backup_files_args;
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
/*
|
|
|
|
* Backup routines
|
|
|
|
*/
|
|
|
|
static void backup_cleanup(bool fatal, void *userdata);
|
2017-03-21 10:54:49 +02:00
|
|
|
static void backup_disconnect(bool fatal, void *userdata);
|
|
|
|
|
2016-02-29 19:23:48 +02:00
|
|
|
static void backup_files(void *arg);
|
2017-04-18 10:41:02 +02:00
|
|
|
static void do_backup_database(parray *backup_list);
|
2017-03-21 17:30:48 +02:00
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
static void pg_start_backup(const char *label, bool smooth, pgBackup *backup);
|
|
|
|
static void pg_stop_backup(pgBackup *backup);
|
2017-03-21 17:30:48 +02:00
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
static void add_pgdata_files(parray *files, const char *root);
|
2017-04-18 10:41:02 +02:00
|
|
|
static void write_backup_file_list(parray *files, const char *root);
|
2017-03-23 16:00:43 +02:00
|
|
|
static void wait_archive_lsn(XLogRecPtr lsn, bool prev_segno);
|
2017-03-21 10:54:49 +02:00
|
|
|
static void make_pagemap_from_ptrack(parray *files);
|
|
|
|
static void StreamLog(void *arg);
|
|
|
|
|
|
|
|
/* Ptrack functions */
|
2016-02-27 20:07:55 +02:00
|
|
|
static void pg_ptrack_clear(void);
|
2016-10-18 15:25:13 +02:00
|
|
|
static bool pg_ptrack_support(void);
|
2016-11-22 15:07:54 +02:00
|
|
|
static bool pg_ptrack_enable(void);
|
2016-10-25 13:38:51 +02:00
|
|
|
static bool pg_is_in_recovery(void);
|
2016-05-11 18:35:14 +02:00
|
|
|
static char *pg_ptrack_get_and_clear(Oid tablespace_oid,
|
|
|
|
Oid db_oid,
|
|
|
|
Oid rel_oid,
|
|
|
|
size_t *result_size);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
/* Check functions */
|
|
|
|
static void check_server_version(void);
|
2017-04-18 10:41:02 +02:00
|
|
|
static void check_system_identifiers(void);
|
2017-03-21 10:54:49 +02:00
|
|
|
static void confirm_block_size(const char *name, int blcksz);
|
2016-05-26 14:56:32 +02:00
|
|
|
|
|
|
|
|
|
|
|
#define disconnect_and_exit(code) \
|
|
|
|
{ \
|
|
|
|
if (conn != NULL) PQfinish(conn); \
|
|
|
|
exit(code); \
|
|
|
|
}
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Take a backup of database.
|
2012-05-18 11:54:36 +03:00
|
|
|
*/
|
2017-04-18 10:41:02 +02:00
|
|
|
static void
|
|
|
|
do_backup_database(parray *backup_list)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
size_t i;
|
|
|
|
char database_path[MAXPGPATH];
|
2016-05-26 14:56:32 +02:00
|
|
|
char dst_backup_path[MAXPGPATH];
|
2012-05-18 11:54:36 +03:00
|
|
|
char label[1024];
|
2017-04-18 10:41:02 +02:00
|
|
|
XLogRecPtr *prev_backup_start_lsn = NULL;
|
|
|
|
|
2016-02-29 19:23:48 +02:00
|
|
|
pthread_t backup_threads[num_threads];
|
2016-05-26 14:56:32 +02:00
|
|
|
pthread_t stream_thread;
|
2016-02-29 19:23:48 +02:00
|
|
|
backup_files_args *backup_threads_args[num_threads];
|
2013-09-09 12:00:13 +03:00
|
|
|
|
2016-01-15 16:47:38 +02:00
|
|
|
pgBackup *prev_backup = NULL;
|
2017-04-18 10:41:02 +02:00
|
|
|
char prev_backup_filelist_path[MAXPGPATH];
|
|
|
|
parray *prev_backup_filelist = NULL;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "Database backup start");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2013-12-15 17:41:42 +03:00
|
|
|
/* Initialize size summary */
|
2014-01-09 22:11:27 +03:00
|
|
|
current.data_bytes = 0;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Obtain current timeline from PGDATA control file */
|
2016-03-02 19:50:33 +02:00
|
|
|
current.tli = get_current_timeline(false);
|
2013-12-12 21:55:39 +03:00
|
|
|
|
2013-12-24 23:27:25 +03:00
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* In incremental backup mode ensure that already-validated
|
|
|
|
* backup on current timeline exists.
|
2013-12-24 23:27:25 +03:00
|
|
|
*/
|
2016-02-27 20:07:55 +02:00
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE ||
|
|
|
|
current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
2013-12-24 23:27:25 +03:00
|
|
|
{
|
|
|
|
prev_backup = catalog_get_last_data_backup(backup_list, current.tli);
|
|
|
|
if (prev_backup == NULL)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(ERROR, "Valid backup on current timeline is not found."
|
|
|
|
"Create new FULL backup before an incremental one.");
|
2013-12-24 23:27:25 +03:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Clear ptrack files for FULL and PAGE backup */
|
2017-04-05 18:48:55 +02:00
|
|
|
if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && is_ptrack_enable)
|
2016-05-11 20:49:26 +02:00
|
|
|
pg_ptrack_clear();
|
|
|
|
|
2016-10-13 16:25:53 +02:00
|
|
|
/* notify start of backup to PostgreSQL server */
|
|
|
|
time2iso(label, lengthof(label), current.start_time);
|
2016-11-16 19:34:21 +02:00
|
|
|
strncat(label, " with pg_probackup", lengthof(label));
|
2016-10-13 16:25:53 +02:00
|
|
|
pg_start_backup(label, smooth_checkpoint, ¤t);
|
|
|
|
|
2017-02-25 14:12:07 +02:00
|
|
|
pgBackupGetPath(¤t, database_path, lengthof(database_path),
|
|
|
|
DATABASE_DIR);
|
|
|
|
|
2016-05-26 14:56:32 +02:00
|
|
|
/* start stream replication */
|
|
|
|
if (stream_wal)
|
|
|
|
{
|
2017-03-24 15:58:35 +02:00
|
|
|
join_path_components(dst_backup_path, database_path, PG_XLOG_DIR);
|
2016-05-26 14:56:32 +02:00
|
|
|
dir_create_dir(dst_backup_path, DIR_PERMISSION);
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2016-11-14 17:10:02 +02:00
|
|
|
pthread_mutex_lock(&check_stream_mut);
|
2016-05-26 14:56:32 +02:00
|
|
|
pthread_create(&stream_thread, NULL, (void *(*)(void *)) StreamLog, dst_backup_path);
|
2016-11-14 17:10:02 +02:00
|
|
|
pthread_mutex_lock(&check_stream_mut);
|
|
|
|
if (conn == NULL)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(ERROR, "Cannot continue backup because stream connect has failed.");
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2016-11-14 17:10:02 +02:00
|
|
|
pthread_mutex_unlock(&check_stream_mut);
|
2016-05-26 14:56:32 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* If backup_label does not exist in $PGDATA, stop taking backup.
|
|
|
|
* NOTE. We can check it only on master, though.
|
|
|
|
*/
|
2016-09-29 16:33:21 +02:00
|
|
|
if(!from_replica)
|
2013-09-09 12:00:13 +03:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
char label_path[MAXPGPATH];
|
2017-03-24 15:58:35 +02:00
|
|
|
join_path_components(label_path, pgdata, PG_BACKUP_LABEL_FILE);
|
2016-09-29 16:33:21 +02:00
|
|
|
|
|
|
|
/* Leave if no backup file */
|
2017-02-25 14:12:07 +02:00
|
|
|
if (!fileExists(label_path))
|
2016-09-29 16:33:21 +02:00
|
|
|
{
|
2017-03-24 15:58:35 +02:00
|
|
|
elog(LOG, "%s does not exist, stopping backup", PG_BACKUP_LABEL_FILE);
|
2016-09-29 16:33:21 +02:00
|
|
|
pg_stop_backup(NULL);
|
2017-03-24 15:58:35 +02:00
|
|
|
elog(ERROR, "%s does not exist in PGDATA", PG_BACKUP_LABEL_FILE);
|
2016-09-29 16:33:21 +02:00
|
|
|
}
|
2013-09-09 12:00:13 +03:00
|
|
|
}
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* To take incremental backup get the filelist of the last completed database
|
2012-05-18 11:54:36 +03:00
|
|
|
*/
|
2016-02-27 20:07:55 +02:00
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE ||
|
|
|
|
current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-16 16:23:43 +02:00
|
|
|
Assert(prev_backup);
|
2017-04-18 10:41:02 +02:00
|
|
|
pgBackupGetPath(prev_backup, prev_backup_filelist_path, lengthof(prev_backup_filelist_path),
|
2017-02-25 14:12:07 +02:00
|
|
|
DATABASE_FILE_LIST);
|
2017-04-18 10:41:02 +02:00
|
|
|
prev_backup_filelist = dir_read_file_list(pgdata, prev_backup_filelist_path);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* If lsn is not NULL, only pages with higher lsn will be copied. */
|
|
|
|
prev_backup_start_lsn = &prev_backup->start_lsn;
|
2016-12-19 17:41:41 +02:00
|
|
|
|
|
|
|
current.parent_backup = prev_backup->start_time;
|
2017-04-19 11:01:10 +02:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
|
2016-01-15 16:47:38 +02:00
|
|
|
/* initialize backup list */
|
|
|
|
backup_files_list = parray_new();
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2016-01-15 08:26:05 +02:00
|
|
|
/* list files with the logical path. omit $PGDATA */
|
2017-02-25 14:12:07 +02:00
|
|
|
add_pgdata_files(backup_files_list, pgdata);
|
2016-01-15 16:47:38 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (current.backup_mode != BACKUP_MODE_FULL)
|
2016-01-15 16:47:38 +02:00
|
|
|
{
|
|
|
|
elog(LOG, "current_tli:%X", current.tli);
|
|
|
|
elog(LOG, "prev_backup->start_lsn: %X/%X",
|
2017-04-18 10:41:02 +02:00
|
|
|
(uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn));
|
2016-01-15 16:47:38 +02:00
|
|
|
elog(LOG, "current.start_lsn: %X/%X",
|
2017-04-18 10:41:02 +02:00
|
|
|
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
|
|
|
|
|
|
|
|
/* TODO for some reason we sort the list for both incremental modes.
|
|
|
|
* Is it necessary?
|
|
|
|
*/
|
|
|
|
parray_qsort(backup_files_list, pgFileComparePathDesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build page mapping in incremental mode.
|
|
|
|
*/
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Build the page map. Obtain information about changed pages
|
|
|
|
* reading WAL segments present in archives up to the point
|
|
|
|
* where this backup has started.
|
|
|
|
*/
|
2016-01-15 16:47:38 +02:00
|
|
|
extractPageMap(arclog_path, prev_backup->start_lsn, current.tli,
|
2017-03-23 16:00:43 +02:00
|
|
|
current.start_lsn,
|
|
|
|
/*
|
|
|
|
* For backup from master wait for previous segment.
|
|
|
|
* For backup from replica wait for current segment.
|
|
|
|
*/
|
|
|
|
!from_replica);
|
2016-01-15 16:47:38 +02:00
|
|
|
}
|
2017-03-21 17:30:48 +02:00
|
|
|
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
2016-02-27 20:07:55 +02:00
|
|
|
{
|
2017-03-21 17:30:48 +02:00
|
|
|
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn();
|
|
|
|
|
2016-05-12 11:51:36 +02:00
|
|
|
if (ptrack_lsn > prev_backup->stop_lsn)
|
2017-02-14 10:32:43 +02:00
|
|
|
{
|
2017-04-20 11:38:51 +02:00
|
|
|
elog(ERROR, "LSN from ptrack_control %lx differs from LSN of previous ptrack backup %lx.\n"
|
2017-04-18 10:41:02 +02:00
|
|
|
"Create new full backup before an incremental one.",
|
|
|
|
ptrack_lsn, prev_backup->start_lsn);
|
2017-02-14 10:32:43 +02:00
|
|
|
}
|
2016-02-27 20:07:55 +02:00
|
|
|
make_pagemap_from_ptrack(backup_files_list);
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* sort pathname ascending TODO What for?*/
|
2016-02-29 19:23:48 +02:00
|
|
|
parray_qsort(backup_files_list, pgFileComparePath);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* make dirs before backup
|
|
|
|
* and setup threads at the same time
|
|
|
|
*/
|
2016-02-29 19:23:48 +02:00
|
|
|
for (i = 0; i < parray_num(backup_files_list); i++)
|
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
pgFile *file = (pgFile *) parray_get(backup_files_list, i);
|
2016-02-29 19:23:48 +02:00
|
|
|
|
|
|
|
/* if the entry was a directory, create it in the backup */
|
2017-02-25 14:12:07 +02:00
|
|
|
if (S_ISDIR(file->mode))
|
2016-02-29 19:23:48 +02:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
char dirpath[MAXPGPATH];
|
2017-04-18 10:41:02 +02:00
|
|
|
char *dir_name = GetRelativePath(file->path, pgdata);
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2016-02-29 19:23:48 +02:00
|
|
|
if (verbose)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "Create directory \"%s\"", dir_name);
|
|
|
|
|
2017-02-25 14:12:07 +02:00
|
|
|
join_path_components(dirpath, database_path, dir_name);
|
|
|
|
dir_create_dir(dirpath, DIR_PERMISSION);
|
2016-02-29 19:23:48 +02:00
|
|
|
}
|
2016-09-02 19:38:39 +02:00
|
|
|
|
|
|
|
__sync_lock_release(&file->lock);
|
2016-02-29 19:23:48 +02:00
|
|
|
}
|
|
|
|
|
2016-09-02 18:31:49 +02:00
|
|
|
/* sort by size for load balancing */
|
|
|
|
parray_qsort(backup_files_list, pgFileCompareSize);
|
|
|
|
|
|
|
|
/* init thread args with own file lists */
|
2016-02-29 19:23:48 +02:00
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
backup_files_args *arg = pg_malloc(sizeof(backup_files_args));
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2016-02-29 19:23:48 +02:00
|
|
|
arg->from_root = pgdata;
|
2017-02-25 14:12:07 +02:00
|
|
|
arg->to_root = database_path;
|
2017-04-18 10:41:02 +02:00
|
|
|
arg->backup_files_list = backup_files_list;
|
|
|
|
arg->prev_backup_filelist = prev_backup_filelist;
|
|
|
|
arg->prev_backup_start_lsn = prev_backup_start_lsn;
|
2016-09-02 18:31:49 +02:00
|
|
|
backup_threads_args[i] = arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Run threads */
|
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
2016-02-29 19:23:48 +02:00
|
|
|
if (verbose)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(WARNING, "Start thread num:%li", parray_num(backup_threads_args[i]->backup_files_list));
|
2016-09-02 18:31:49 +02:00
|
|
|
pthread_create(&backup_threads[i], NULL, (void *(*)(void *)) backup_files, backup_threads_args[i]);
|
2016-02-29 19:23:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait theads */
|
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
pthread_join(backup_threads[i], NULL);
|
|
|
|
pg_free(backup_threads_args[i]);
|
|
|
|
}
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-02-25 14:12:07 +02:00
|
|
|
/* clean previous backup file list */
|
2017-04-18 10:41:02 +02:00
|
|
|
if (prev_backup_filelist)
|
2017-02-25 14:12:07 +02:00
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
parray_walk(prev_backup_filelist, pgFileFree);
|
|
|
|
parray_free(prev_backup_filelist);
|
2017-02-25 14:12:07 +02:00
|
|
|
}
|
|
|
|
|
2016-02-27 20:07:55 +02:00
|
|
|
/* Notify end of backup */
|
2016-01-15 08:26:05 +02:00
|
|
|
pg_stop_backup(¤t);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Add archived xlog files into the list of files of this backup */
|
2016-05-26 14:56:32 +02:00
|
|
|
if (stream_wal)
|
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
parray *xlog_files_list;
|
2017-02-25 14:12:07 +02:00
|
|
|
char pg_xlog_path[MAXPGPATH];
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Wait for the completion of stream */
|
2016-05-26 14:56:32 +02:00
|
|
|
pthread_join(stream_thread, NULL);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Scan backup PG_XLOG_DIR */
|
|
|
|
xlog_files_list = parray_new();
|
2017-03-24 15:58:35 +02:00
|
|
|
join_path_components(pg_xlog_path, database_path, PG_XLOG_DIR);
|
2017-04-18 10:41:02 +02:00
|
|
|
dir_list_file(xlog_files_list, pg_xlog_path, false, true, false);
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
for (i = 0; i < parray_num(xlog_files_list); i++)
|
2016-05-26 14:56:32 +02:00
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
pgFile *file = (pgFile *) parray_get(xlog_files_list, i);
|
|
|
|
calc_file_checksum(file);
|
|
|
|
/* Remove file path root prefix*/
|
2017-02-25 14:12:07 +02:00
|
|
|
if (strstr(file->path, database_path) == file->path)
|
2016-05-26 14:56:32 +02:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
char *ptr = file->path;
|
2017-04-18 10:41:02 +02:00
|
|
|
file->path = pstrdup(GetRelativePath(ptr, database_path));
|
2016-05-26 14:56:32 +02:00
|
|
|
free(ptr);
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 10:41:02 +02:00
|
|
|
|
|
|
|
/* Add xlog files into the list of backed up files */
|
|
|
|
parray_concat(backup_files_list, xlog_files_list);
|
|
|
|
parray_free(xlog_files_list);
|
2016-05-26 14:56:32 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Print the list of files to backup catalog */
|
|
|
|
write_backup_file_list(backup_files_list, pgdata);
|
2013-12-09 21:21:07 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Compute summary of size of regular files in the backup */
|
2016-01-15 16:47:38 +02:00
|
|
|
for (i = 0; i < parray_num(backup_files_list); i++)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
pgFile *file = (pgFile *) parray_get(backup_files_list, i);
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
if (!S_ISREG(file->mode))
|
|
|
|
continue;
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Count the amount of the data actually copied */
|
|
|
|
current.data_bytes += file->write_size;
|
|
|
|
}
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (backup_files_list)
|
|
|
|
parray_walk(backup_files_list, pgFileFree);
|
|
|
|
parray_free(backup_files_list);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Entry point of pg_probackup BACKUP subcommand.
|
|
|
|
*/
|
2012-05-18 11:54:36 +03:00
|
|
|
int
|
2017-04-18 10:41:02 +02:00
|
|
|
do_backup(void)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-12 22:42:10 +02:00
|
|
|
parray *backup_list;
|
2017-04-11 16:56:46 +02:00
|
|
|
bool is_ptrack_support;
|
2013-09-09 12:00:13 +03:00
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
/* PGDATA and BACKUP_MODE are always required */
|
|
|
|
if (pgdata == NULL)
|
2017-04-05 18:48:55 +02:00
|
|
|
elog(ERROR, "required parameter not specified: PGDATA "
|
2016-01-14 09:36:39 +02:00
|
|
|
"(-D, --pgdata)");
|
2012-05-18 11:54:36 +03:00
|
|
|
if (current.backup_mode == BACKUP_MODE_INVALID)
|
2017-04-05 18:48:55 +02:00
|
|
|
elog(ERROR, "required parameter not specified: BACKUP_MODE "
|
2016-01-14 09:36:39 +02:00
|
|
|
"(-b, --backup-mode)");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
/* Create connection for PostgreSQL */
|
|
|
|
backup_conn = pgut_connect(pgut_dbname);
|
|
|
|
pgut_atexit_push(backup_disconnect, NULL);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Confirm that this server version is supported */
|
2013-12-12 16:20:08 +03:00
|
|
|
check_server_version();
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Confirm data block size and xlog block size are compatible */
|
|
|
|
confirm_block_size("block_size", BLCKSZ);
|
|
|
|
confirm_block_size("wal_block_size", XLOG_BLCKSZ);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-03-13 15:22:48 +02:00
|
|
|
from_replica = pg_is_in_recovery();
|
2017-04-18 10:41:02 +02:00
|
|
|
current.checksum_version = get_data_checksum_version(true);
|
|
|
|
current.stream = stream_wal;
|
2017-03-13 15:22:48 +02:00
|
|
|
|
2017-04-05 18:48:55 +02:00
|
|
|
/* ptrack backup checks */
|
2017-04-11 16:56:46 +02:00
|
|
|
is_ptrack_support = pg_ptrack_support();
|
|
|
|
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK && !is_ptrack_support)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(ERROR, "This PostgreSQL instance does not support ptrack");
|
2017-04-05 18:48:55 +02:00
|
|
|
|
2017-04-11 16:56:46 +02:00
|
|
|
if (is_ptrack_support)
|
|
|
|
{
|
|
|
|
is_ptrack_enable = pg_ptrack_enable();
|
|
|
|
if(current.backup_mode == BACKUP_MODE_DIFF_PTRACK && !is_ptrack_enable)
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(ERROR, "Ptrack is disabled");
|
2017-04-11 16:56:46 +02:00
|
|
|
}
|
2017-04-05 18:48:55 +02:00
|
|
|
|
|
|
|
/* Get exclusive lock of backup catalog */
|
2017-04-18 10:41:02 +02:00
|
|
|
catalog_lock();
|
2017-04-05 18:48:55 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Ensure that backup directory was initialized for the same PostgreSQL
|
|
|
|
* instance we opened connection to. And that target backup database PGDATA
|
|
|
|
* belogns to the same instance.
|
|
|
|
*/
|
|
|
|
check_system_identifiers();
|
2017-04-05 18:48:55 +02:00
|
|
|
|
2017-04-19 11:01:10 +02:00
|
|
|
elog(LOG, "Backup start. backup-mode = %s+%s",
|
|
|
|
backupModes[current.backup_mode], current.stream?"STREAM":"ARCHIVE");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Start backup. Update backup status. */
|
2012-05-18 11:54:36 +03:00
|
|
|
current.status = BACKUP_STATUS_RUNNING;
|
|
|
|
current.start_time = time(NULL);
|
|
|
|
|
2017-04-19 11:01:10 +02:00
|
|
|
/* Create backup directory and BACKUP_CONTROL_FILE */
|
2017-04-12 16:39:20 +02:00
|
|
|
if (pgBackupCreateDir(¤t))
|
|
|
|
elog(ERROR, "cannot create backup directory");
|
2017-04-19 11:01:10 +02:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2017-04-12 16:39:20 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "Backup destination is initialized");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* get list of backups already taken */
|
2017-04-13 18:37:29 +02:00
|
|
|
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
|
|
|
|
if (backup_list == NULL)
|
|
|
|
elog(ERROR, "Failed to get backup list.");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* set the error processing function for the backup process */
|
|
|
|
pgut_atexit_push(backup_cleanup, NULL);
|
|
|
|
|
|
|
|
/* backup data */
|
2017-04-18 10:41:02 +02:00
|
|
|
do_backup_database(backup_list);
|
2012-05-18 11:54:36 +03:00
|
|
|
pgut_atexit_pop(backup_cleanup, NULL);
|
2013-12-09 21:21:07 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Backup is done. Update backup status */
|
2012-05-18 11:54:36 +03:00
|
|
|
current.end_time = time(NULL);
|
|
|
|
current.status = BACKUP_STATUS_DONE;
|
2017-04-19 11:01:10 +02:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "Backup completed. Total bytes : " INT64_FORMAT "",
|
|
|
|
current.data_bytes);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-14 11:51:05 +02:00
|
|
|
pgBackupValidate(¤t);
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Confirm that this server version is supported
|
2012-05-18 11:54:36 +03:00
|
|
|
*/
|
2017-03-21 10:54:49 +02:00
|
|
|
static void
|
2013-12-12 16:20:08 +03:00
|
|
|
check_server_version(void)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
static int server_version = 0;
|
2012-05-18 11:54:36 +03:00
|
|
|
/* confirm server version */
|
2017-03-21 10:54:49 +02:00
|
|
|
server_version = PQserverVersion(backup_conn);
|
|
|
|
|
2016-03-02 11:11:25 +02:00
|
|
|
if (server_version < 90500)
|
2016-01-19 05:41:30 +02:00
|
|
|
elog(ERROR,
|
2017-03-21 10:54:49 +02:00
|
|
|
"server version is %d.%d.%d, must be %s or higher",
|
2013-12-12 16:20:08 +03:00
|
|
|
server_version / 10000,
|
|
|
|
(server_version / 100) % 100,
|
2016-03-02 11:11:25 +02:00
|
|
|
server_version % 100, "9.5");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2016-09-29 16:33:21 +02:00
|
|
|
if (from_replica && server_version < 90600)
|
|
|
|
elog(ERROR,
|
2017-03-21 10:54:49 +02:00
|
|
|
"server version is %d.%d.%d, must be %s or higher for backup from replica",
|
2016-09-29 16:33:21 +02:00
|
|
|
server_version / 10000,
|
|
|
|
(server_version / 100) % 100,
|
|
|
|
server_version % 100, "9.6");
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
|
2017-04-05 18:48:55 +02:00
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Ensure that backup directory was initialized for the same PostgreSQL
|
|
|
|
* instance we opened connection to. And that target backup database PGDATA
|
|
|
|
* belogns to the same instance.
|
|
|
|
* All system identifiers must be equal.
|
2017-04-05 18:48:55 +02:00
|
|
|
*/
|
|
|
|
static void
|
2017-04-18 10:41:02 +02:00
|
|
|
check_system_identifiers(void)
|
2017-04-05 18:48:55 +02:00
|
|
|
{
|
|
|
|
PGresult *res;
|
2017-04-18 10:41:02 +02:00
|
|
|
uint64 system_id_conn;
|
|
|
|
uint64 system_id_pgdata;
|
2017-04-05 18:48:55 +02:00
|
|
|
char *val;
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
system_id_pgdata = get_system_identifier();
|
|
|
|
|
2017-04-05 18:48:55 +02:00
|
|
|
res = pgut_execute(backup_conn,
|
|
|
|
"SELECT system_identifier FROM pg_control_system()",
|
|
|
|
0, NULL);
|
|
|
|
val = PQgetvalue(res, 0, 0);
|
|
|
|
PQclear(res);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (!parse_uint64(val, &system_id_conn))
|
2017-04-05 18:48:55 +02:00
|
|
|
elog(ERROR, "%s is not system_identifier", val);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (system_id_conn != system_identifier)
|
|
|
|
elog(ERROR, "Backup data directory was initialized for system id %ld, but connected instance system id is %ld",
|
|
|
|
system_identifier, system_id_conn);
|
|
|
|
|
|
|
|
if (system_id_pgdata != system_identifier)
|
|
|
|
elog(ERROR, "Backup data directory was initialized for system id %ld, but target backup directory system id is %ld",
|
|
|
|
system_identifier, system_id_pgdata);
|
2017-04-05 18:48:55 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Ensure that target backup database is initialized with
|
|
|
|
* compatible settings. Currently check BLCKSZ and XLOG_BLCKSZ.
|
|
|
|
*/
|
2012-05-18 11:54:36 +03:00
|
|
|
static void
|
|
|
|
confirm_block_size(const char *name, int blcksz)
|
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
char *endp;
|
|
|
|
int block_size;
|
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
res = pgut_execute(backup_conn, "SELECT current_setting($1)", 1, &name);
|
2012-05-18 11:54:36 +03:00
|
|
|
if (PQntuples(res) != 1 || PQnfields(res) != 1)
|
2017-03-21 10:54:49 +02:00
|
|
|
elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(backup_conn));
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10);
|
|
|
|
PQclear(res);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
if ((endp && *endp) || block_size != blcksz)
|
2016-01-19 05:41:30 +02:00
|
|
|
elog(ERROR,
|
2017-03-21 10:54:49 +02:00
|
|
|
"%s(%d) is not compatible(%d expected)",
|
|
|
|
name, block_size, blcksz);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify start of backup to PostgreSQL server.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pg_start_backup(const char *label, bool smooth, pgBackup *backup)
|
|
|
|
{
|
2017-03-21 17:30:48 +02:00
|
|
|
PGresult *res;
|
|
|
|
const char *params[2];
|
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
params[0] = label;
|
|
|
|
|
2013-12-12 16:20:08 +03:00
|
|
|
/* 2nd argument is 'fast'*/
|
|
|
|
params[1] = smooth ? "false" : "true";
|
2016-09-29 16:33:21 +02:00
|
|
|
if (from_replica)
|
2017-03-21 10:54:49 +02:00
|
|
|
res = pgut_execute(backup_conn,
|
2016-09-29 16:33:21 +02:00
|
|
|
"SELECT pg_start_backup($1, $2, false)",
|
|
|
|
2,
|
2017-03-21 10:54:49 +02:00
|
|
|
params);
|
2016-09-29 16:33:21 +02:00
|
|
|
else
|
2017-03-21 10:54:49 +02:00
|
|
|
res = pgut_execute(backup_conn,
|
2016-09-29 16:33:21 +02:00
|
|
|
"SELECT pg_start_backup($1, $2)",
|
|
|
|
2,
|
2017-03-21 10:54:49 +02:00
|
|
|
params);
|
2013-12-12 16:20:08 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Extract timeline and LSN from results of pg_start_backup() */
|
2017-03-21 17:30:48 +02:00
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
|
|
|
/* Calculate LSN */
|
|
|
|
backup->start_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
|
|
|
|
|
|
|
if (!stream_wal)
|
2017-03-23 16:00:43 +02:00
|
|
|
wait_archive_lsn(backup->start_lsn,
|
|
|
|
/*
|
|
|
|
* For backup from master wait for previous segment.
|
|
|
|
* For backup from replica wait for current segment.
|
|
|
|
*/
|
|
|
|
!from_replica);
|
2016-11-25 15:54:24 +02:00
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Check if the instance supports ptrack
|
|
|
|
* TODO Implement check of ptrack_version() instead of existing one
|
|
|
|
*/
|
2016-10-18 15:25:13 +02:00
|
|
|
static bool
|
|
|
|
pg_ptrack_support(void)
|
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGresult *res_db;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn,
|
|
|
|
"SELECT proname FROM pg_proc WHERE proname='pg_ptrack_clear'",
|
|
|
|
0, NULL);
|
|
|
|
|
2016-10-18 15:25:13 +02:00
|
|
|
if (PQntuples(res_db) == 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
2016-10-25 13:38:51 +02:00
|
|
|
PQclear(res_db);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
2016-11-22 15:07:54 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Check if ptrack is enabled in target instance */
|
2016-11-22 15:07:54 +02:00
|
|
|
static bool
|
|
|
|
pg_ptrack_enable(void)
|
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGresult *res_db;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn, "show ptrack_enable", 0, NULL);
|
|
|
|
|
2016-11-15 15:23:53 +02:00
|
|
|
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
2016-10-18 15:25:13 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Check if target instance is replica */
|
2016-10-25 13:38:51 +02:00
|
|
|
static bool
|
|
|
|
pg_is_in_recovery(void)
|
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGresult *res_db;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn, "SELECT pg_is_in_recovery()", 0, NULL);
|
2016-10-25 13:38:51 +02:00
|
|
|
|
|
|
|
if (PQgetvalue(res_db, 0, 0)[0] == 't')
|
|
|
|
{
|
|
|
|
PQclear(res_db);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
PQclear(res_db);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Clear ptrack files in all databases of the instance we connected to */
|
2016-02-27 20:07:55 +02:00
|
|
|
static void
|
|
|
|
pg_ptrack_clear(void)
|
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGresult *res_db,
|
|
|
|
*res;
|
|
|
|
const char *dbname;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn, "SELECT datname FROM pg_database",
|
|
|
|
0, NULL);
|
2016-02-27 20:07:55 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
for(i = 0; i < PQntuples(res_db); i++)
|
2016-04-15 14:58:58 +02:00
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGconn *tmp_conn;
|
|
|
|
|
|
|
|
dbname = PQgetvalue(res_db, i, 0);
|
|
|
|
if (!strcmp(dbname, "template0"))
|
2016-04-15 14:58:58 +02:00
|
|
|
continue;
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
tmp_conn = pgut_connect(dbname);
|
|
|
|
res = pgut_execute(tmp_conn, "SELECT pg_ptrack_clear()", 0, NULL);
|
2016-04-15 14:58:58 +02:00
|
|
|
PQclear(res);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
pgut_disconnect(tmp_conn);
|
2016-04-15 14:58:58 +02:00
|
|
|
}
|
2017-03-21 10:54:49 +02:00
|
|
|
|
2016-04-15 14:58:58 +02:00
|
|
|
PQclear(res_db);
|
2016-02-27 20:07:55 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Read and clear ptrack files of the target relation.
|
|
|
|
* Result is a bytea ptrack map of all segments of the target relation.
|
|
|
|
*/
|
2016-05-11 18:35:14 +02:00
|
|
|
static char *
|
2017-03-21 10:54:49 +02:00
|
|
|
pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_oid,
|
|
|
|
size_t *result_size)
|
2016-05-11 18:35:14 +02:00
|
|
|
{
|
2017-03-21 10:54:49 +02:00
|
|
|
PGconn *tmp_conn;
|
|
|
|
PGresult *res_db,
|
|
|
|
*res;
|
|
|
|
char *dbname;
|
|
|
|
char *params[2];
|
|
|
|
char *result;
|
2016-05-11 18:35:14 +02:00
|
|
|
|
|
|
|
params[0] = palloc(64);
|
|
|
|
params[1] = palloc(64);
|
|
|
|
sprintf(params[0], "%i", db_oid);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
res_db = pgut_execute(backup_conn,
|
|
|
|
"SELECT datname FROM pg_database WHERE oid=$1",
|
|
|
|
1, (const char **) params);
|
|
|
|
|
|
|
|
dbname = pstrdup(PQgetvalue(res_db, 0, 0));
|
2016-05-11 18:35:14 +02:00
|
|
|
PQclear(res_db);
|
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
tmp_conn = pgut_connect(dbname);
|
2016-05-11 18:35:14 +02:00
|
|
|
sprintf(params[0], "%i", tablespace_oid);
|
2017-04-18 10:41:02 +02:00
|
|
|
sprintf(params[1], "%i", rel_oid);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
res = pgut_execute(tmp_conn, "SELECT pg_ptrack_get_and_clear($1, $2)",
|
|
|
|
2, (const char **)params);
|
|
|
|
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
|
|
|
result_size);
|
2016-05-11 18:35:14 +02:00
|
|
|
PQclear(res);
|
2017-03-21 10:54:49 +02:00
|
|
|
|
|
|
|
pgut_disconnect(tmp_conn);
|
|
|
|
|
2016-05-11 18:35:14 +02:00
|
|
|
pfree(params[0]);
|
|
|
|
pfree(params[1]);
|
2017-03-21 10:54:49 +02:00
|
|
|
pfree(dbname);
|
2016-05-11 18:35:14 +02:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
2017-04-20 11:38:51 +02:00
|
|
|
* Wait for target 'lsn' to be archived in archive 'wal' directory with
|
|
|
|
* WAL segment file.
|
2017-04-18 10:41:02 +02:00
|
|
|
*/
|
2016-11-25 15:54:24 +02:00
|
|
|
static void
|
2017-03-23 16:00:43 +02:00
|
|
|
wait_archive_lsn(XLogRecPtr lsn, bool prev_segno)
|
2016-11-25 15:54:24 +02:00
|
|
|
{
|
|
|
|
TimeLineID tli;
|
|
|
|
XLogSegNo targetSegNo;
|
2017-03-21 10:54:49 +02:00
|
|
|
char wal_path[MAXPGPATH];
|
|
|
|
char wal_file[MAXFNAMELEN];
|
2017-03-24 13:07:03 +02:00
|
|
|
uint32 try_count = 0;
|
2016-11-25 15:54:24 +02:00
|
|
|
|
2017-03-24 15:58:35 +02:00
|
|
|
Assert(!stream_wal);
|
|
|
|
|
2016-11-25 15:54:24 +02:00
|
|
|
tli = get_current_timeline(false);
|
|
|
|
|
2017-04-20 11:38:51 +02:00
|
|
|
/* Compute the name of the WAL file containig requested LSN */
|
2016-11-25 15:54:24 +02:00
|
|
|
XLByteToSeg(lsn, targetSegNo);
|
2017-03-23 16:00:43 +02:00
|
|
|
if (prev_segno)
|
2016-11-25 15:54:24 +02:00
|
|
|
targetSegNo--;
|
2017-03-21 10:54:49 +02:00
|
|
|
XLogFileName(wal_file, tli, targetSegNo);
|
2016-11-25 15:54:24 +02:00
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
join_path_components(wal_path, arclog_path, wal_file);
|
|
|
|
|
|
|
|
/* Wait until switched WAL is archived */
|
|
|
|
while (!fileExists(wal_path))
|
2016-11-25 15:54:24 +02:00
|
|
|
{
|
|
|
|
sleep(1);
|
|
|
|
if (interrupted)
|
2017-03-21 10:54:49 +02:00
|
|
|
elog(ERROR, "interrupted during waiting for WAL archiving");
|
2016-11-25 15:54:24 +02:00
|
|
|
try_count++;
|
2017-03-24 13:07:03 +02:00
|
|
|
|
|
|
|
/* Inform user if WAL segment is absent in first attempt */
|
|
|
|
if (try_count == 1)
|
2017-04-20 11:38:51 +02:00
|
|
|
elog(INFO, "wait for LSN %X/%X in archived WAL segment %s",
|
2017-03-24 13:07:03 +02:00
|
|
|
(uint32) (lsn >> 32), (uint32) lsn, wal_path);
|
|
|
|
|
|
|
|
if (archive_timeout > 0 && try_count > archive_timeout)
|
2016-11-25 15:54:24 +02:00
|
|
|
elog(ERROR,
|
2017-03-24 15:58:35 +02:00
|
|
|
"switched WAL segment %s could not be archived in %d seconds",
|
|
|
|
wal_file, archive_timeout);
|
2016-11-25 15:54:24 +02:00
|
|
|
}
|
2017-04-20 11:38:51 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* WAL segment was archived. Check LSN on it if we waited current WAL
|
|
|
|
* segment, not previous.
|
|
|
|
*/
|
|
|
|
if (!prev_segno && !wal_contains_lsn(arclog_path, lsn, tli))
|
|
|
|
elog(ERROR, "WAL segment %s doesn't contain target LSN %X/%X",
|
|
|
|
wal_file, (uint32) (lsn >> 32), (uint32) lsn);
|
2016-11-25 15:54:24 +02:00
|
|
|
}
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
/*
|
|
|
|
* Notify end of backup to PostgreSQL server.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pg_stop_backup(pgBackup *backup)
|
|
|
|
{
|
2017-03-21 17:30:48 +02:00
|
|
|
PGresult *res;
|
|
|
|
uint32 xlogid;
|
|
|
|
uint32 xrecoff;
|
2017-03-24 16:22:55 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We will use this values if there are no transactions between start_lsn
|
|
|
|
* and stop_lsn.
|
|
|
|
*/
|
2017-03-24 15:58:35 +02:00
|
|
|
time_t recovery_time;
|
|
|
|
TransactionId recovery_xid;
|
2017-03-21 17:30:48 +02:00
|
|
|
|
|
|
|
/* Remove annoying NOTICE messages generated by backend */
|
|
|
|
res = pgut_execute(backup_conn, "SET client_min_messages = warning;",
|
|
|
|
0, NULL);
|
|
|
|
PQclear(res);
|
|
|
|
|
|
|
|
if (from_replica)
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Stop the non-exclusive backup. Besides stop_lsn it returns from
|
|
|
|
* pg_stop_backup(false) copy of the backup label and tablespace map
|
|
|
|
* so they can be written to disk by the caller.
|
|
|
|
*/
|
2017-03-21 17:30:48 +02:00
|
|
|
res = pgut_execute(backup_conn,
|
2017-03-24 16:22:55 +02:00
|
|
|
"SELECT *, txid_snapshot_xmax(txid_current_snapshot()),"
|
|
|
|
" current_timestamp(0)::timestamp"
|
|
|
|
" FROM pg_stop_backup(false)",
|
2017-03-24 15:58:35 +02:00
|
|
|
0, NULL);
|
2017-03-21 17:30:48 +02:00
|
|
|
else
|
|
|
|
res = pgut_execute(backup_conn,
|
2017-03-24 16:22:55 +02:00
|
|
|
"SELECT *, txid_snapshot_xmax(txid_current_snapshot()),"
|
|
|
|
" current_timestamp(0)::timestamp"
|
|
|
|
" FROM pg_stop_backup()",
|
2017-03-24 15:58:35 +02:00
|
|
|
0, NULL);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Extract timeline and LSN from results of pg_stop_backup() */
|
2017-03-21 17:30:48 +02:00
|
|
|
XLogDataFromLSN(PQgetvalue(res, 0, 0), &xlogid, &xrecoff);
|
|
|
|
/* Calculate LSN */
|
|
|
|
stop_backup_lsn = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff;
|
|
|
|
|
|
|
|
/* Write backup_label and tablespace_map for backup from replica */
|
|
|
|
if (from_replica)
|
2016-05-26 14:56:32 +02:00
|
|
|
{
|
2017-03-21 17:30:48 +02:00
|
|
|
char path[MAXPGPATH];
|
|
|
|
char backup_label[MAXPGPATH];
|
|
|
|
FILE *fp;
|
|
|
|
pgFile *file;
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-24 16:22:55 +02:00
|
|
|
Assert(PQnfields(res) >= 5);
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-21 17:30:48 +02:00
|
|
|
pgBackupGetPath(¤t, path, lengthof(path), DATABASE_DIR);
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-21 17:30:48 +02:00
|
|
|
/* Write backup_label */
|
2017-04-18 10:41:02 +02:00
|
|
|
join_path_components(backup_label, path, PG_BACKUP_LABEL_FILE);
|
2017-03-21 17:30:48 +02:00
|
|
|
fp = fopen(backup_label, "w");
|
|
|
|
if (fp == NULL)
|
|
|
|
elog(ERROR, "can't open backup label file \"%s\": %s",
|
|
|
|
backup_label, strerror(errno));
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-21 17:30:48 +02:00
|
|
|
fwrite(PQgetvalue(res, 0, 1), 1, strlen(PQgetvalue(res, 0, 1)), fp);
|
|
|
|
fclose(fp);
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-21 17:30:48 +02:00
|
|
|
file = pgFileNew(backup_label, true);
|
2017-04-18 10:41:02 +02:00
|
|
|
calc_file_checksum(file);
|
2017-03-21 17:30:48 +02:00
|
|
|
free(file->path);
|
2017-03-24 15:58:35 +02:00
|
|
|
file->path = strdup(PG_BACKUP_LABEL_FILE);
|
2017-03-21 17:30:48 +02:00
|
|
|
parray_append(backup_files_list, file);
|
|
|
|
|
|
|
|
/* Write tablespace_map */
|
|
|
|
if (strlen(PQgetvalue(res, 0, 2)) > 0)
|
2016-05-26 14:56:32 +02:00
|
|
|
{
|
2017-03-21 17:30:48 +02:00
|
|
|
char tablespace_map[MAXPGPATH];
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
join_path_components(tablespace_map, path, PG_TABLESPACE_MAP_FILE);
|
2017-03-21 17:30:48 +02:00
|
|
|
fp = fopen(tablespace_map, "w");
|
|
|
|
if (fp == NULL)
|
|
|
|
elog(ERROR, "can't open tablespace map file \"%s\": %s",
|
|
|
|
tablespace_map, strerror(errno));
|
|
|
|
|
|
|
|
fwrite(PQgetvalue(res, 0, 2), 1, strlen(PQgetvalue(res, 0, 2)), fp);
|
|
|
|
fclose(fp);
|
|
|
|
|
|
|
|
file = pgFileNew(tablespace_map, true);
|
2017-04-18 10:41:02 +02:00
|
|
|
calc_file_checksum(file);
|
2017-03-21 17:30:48 +02:00
|
|
|
free(file->path);
|
2017-04-18 10:41:02 +02:00
|
|
|
file->path = strdup(PG_TABLESPACE_MAP_FILE);
|
2017-03-21 17:30:48 +02:00
|
|
|
parray_append(backup_files_list, file);
|
2016-05-26 14:56:32 +02:00
|
|
|
}
|
2017-03-24 15:58:35 +02:00
|
|
|
|
|
|
|
if (sscanf(PQgetvalue(res, 0, 3), XID_FMT, &recovery_xid) != 1)
|
|
|
|
elog(ERROR,
|
|
|
|
"result of txid_snapshot_xmax() is invalid: %s",
|
|
|
|
PQerrorMessage(backup_conn));
|
2017-03-24 16:22:55 +02:00
|
|
|
if (!parse_time(PQgetvalue(res, 0, 4), &recovery_time))
|
|
|
|
elog(ERROR,
|
|
|
|
"result of current_timestamp is invalid: %s",
|
|
|
|
PQerrorMessage(backup_conn));
|
2017-03-21 17:30:48 +02:00
|
|
|
}
|
2017-03-24 15:58:35 +02:00
|
|
|
else
|
2017-03-24 16:22:55 +02:00
|
|
|
{
|
2017-03-24 15:58:35 +02:00
|
|
|
if (sscanf(PQgetvalue(res, 0, 1), XID_FMT, &recovery_xid) != 1)
|
|
|
|
elog(ERROR,
|
|
|
|
"result of txid_snapshot_xmax() is invalid: %s",
|
|
|
|
PQerrorMessage(backup_conn));
|
2017-03-24 16:22:55 +02:00
|
|
|
if (!parse_time(PQgetvalue(res, 0, 2), &recovery_time))
|
|
|
|
elog(ERROR,
|
|
|
|
"result of current_timestamp is invalid: %s",
|
|
|
|
PQerrorMessage(backup_conn));
|
|
|
|
}
|
2017-03-21 17:30:48 +02:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
|
2017-03-23 16:00:43 +02:00
|
|
|
if (!stream_wal)
|
|
|
|
wait_archive_lsn(stop_backup_lsn, false);
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* Fill in fields if that is the correct end of backup. */
|
2017-03-21 17:30:48 +02:00
|
|
|
if (backup != NULL)
|
|
|
|
{
|
2017-03-24 15:58:35 +02:00
|
|
|
char *xlog_path,
|
|
|
|
stream_xlog_path[MAXPGPATH];
|
2016-05-26 14:56:32 +02:00
|
|
|
|
2017-03-24 15:58:35 +02:00
|
|
|
if (stream_wal)
|
|
|
|
{
|
|
|
|
join_path_components(stream_xlog_path, pgdata, PG_XLOG_DIR);
|
|
|
|
xlog_path = stream_xlog_path;
|
|
|
|
}
|
2016-09-29 16:33:21 +02:00
|
|
|
else
|
2017-03-24 15:58:35 +02:00
|
|
|
xlog_path = arclog_path;
|
2017-03-21 17:30:48 +02:00
|
|
|
|
2017-03-24 15:58:35 +02:00
|
|
|
backup->tli = get_current_timeline(false);
|
|
|
|
backup->stop_lsn = stop_backup_lsn;
|
2017-03-21 10:54:49 +02:00
|
|
|
|
2017-03-24 15:58:35 +02:00
|
|
|
if (!read_recovery_info(xlog_path, backup->tli,
|
|
|
|
backup->start_lsn, backup->stop_lsn,
|
|
|
|
&backup->recovery_time, &backup->recovery_xid))
|
|
|
|
{
|
|
|
|
backup->recovery_time = recovery_time;
|
|
|
|
backup->recovery_xid = recovery_xid;
|
|
|
|
}
|
2016-05-26 14:56:32 +02:00
|
|
|
}
|
2013-12-26 15:13:48 +03:00
|
|
|
}
|
|
|
|
|
2012-05-18 11:54:36 +03:00
|
|
|
/*
|
|
|
|
* Return true if the path is a existing regular file.
|
|
|
|
*/
|
2013-09-09 12:00:13 +03:00
|
|
|
bool
|
2012-05-18 11:54:36 +03:00
|
|
|
fileExists(const char *path)
|
|
|
|
{
|
|
|
|
struct stat buf;
|
|
|
|
|
|
|
|
if (stat(path, &buf) == -1 && errno == ENOENT)
|
|
|
|
return false;
|
|
|
|
else if (!S_ISREG(buf.st_mode))
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify end of backup to server when "backup_label" is in the root directory
|
|
|
|
* of the DB cluster.
|
|
|
|
* Also update backup status to ERROR when the backup is not finished.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
backup_cleanup(bool fatal, void *userdata)
|
|
|
|
{
|
|
|
|
char path[MAXPGPATH];
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* If backup_label exists in $PGDATA, notify stop of backup to PostgreSQL */
|
2017-03-24 15:58:35 +02:00
|
|
|
join_path_components(path, pgdata, PG_BACKUP_LABEL_FILE);
|
2012-05-18 11:54:36 +03:00
|
|
|
if (fileExists(path))
|
|
|
|
{
|
2017-03-24 15:58:35 +02:00
|
|
|
elog(LOG, "%s exists, stop backup", PG_BACKUP_LABEL_FILE);
|
2012-05-18 11:54:36 +03:00
|
|
|
pg_stop_backup(NULL); /* don't care stop_lsn on error case */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-19 11:01:10 +02:00
|
|
|
* Update status of backup in BACKUP_CONTROL_FILE to ERROR.
|
2012-05-18 11:54:36 +03:00
|
|
|
* end_time != 0 means backup finished
|
|
|
|
*/
|
|
|
|
if (current.status == BACKUP_STATUS_RUNNING && current.end_time == 0)
|
|
|
|
{
|
2017-04-13 18:37:29 +02:00
|
|
|
elog(LOG, "Backup is running, update its status to ERROR");
|
2012-05-18 11:54:36 +03:00
|
|
|
current.end_time = time(NULL);
|
|
|
|
current.status = BACKUP_STATUS_ERROR;
|
2017-04-19 11:01:10 +02:00
|
|
|
pgBackupWriteBackupControlFile(¤t);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-21 10:54:49 +02:00
|
|
|
/*
|
|
|
|
* Disconnect backup connection during quit pg_probackup.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
backup_disconnect(bool fatal, void *userdata)
|
|
|
|
{
|
|
|
|
pgut_disconnect(backup_conn);
|
|
|
|
}
|
|
|
|
|
2017-02-13 10:44:53 +02:00
|
|
|
/* Count bytes in file */
|
|
|
|
static long
|
2017-04-18 10:41:02 +02:00
|
|
|
file_size(const char *file_path)
|
2017-02-13 10:44:53 +02:00
|
|
|
{
|
|
|
|
long r;
|
2017-04-18 10:41:02 +02:00
|
|
|
FILE *f = fopen(file_path, "r");
|
2017-02-13 10:44:53 +02:00
|
|
|
|
|
|
|
if (!f)
|
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(ERROR, "%s: cannot open file \"%s\" for reading: %s\n",
|
|
|
|
PROGRAM_NAME ,file_path, strerror(errno));
|
2017-02-13 10:44:53 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
fseek(f, 0, SEEK_END);
|
|
|
|
r = ftell(f);
|
|
|
|
fclose(f);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find corresponding file in previous backup.
|
|
|
|
* Compare generations and return true if we don't need full copy
|
|
|
|
* of the file, but just part of it.
|
|
|
|
*
|
|
|
|
* skip_size - size of the file in previous backup. We can skip it
|
|
|
|
* and copy just remaining part of the file.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
backup_compressed_file_partially(pgFile *file, void *arg, size_t *skip_size)
|
|
|
|
{
|
|
|
|
bool result = false;
|
|
|
|
pgFile *prev_file = NULL;
|
|
|
|
size_t current_file_size;
|
|
|
|
backup_files_args *arguments = (backup_files_args *) arg;
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (arguments->prev_backup_filelist)
|
2017-02-13 10:44:53 +02:00
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
pgFile **p = (pgFile **) parray_bsearch(arguments->prev_backup_filelist,
|
2017-02-13 10:44:53 +02:00
|
|
|
file, pgFileComparePath);
|
|
|
|
if (p)
|
|
|
|
prev_file = *p;
|
|
|
|
|
|
|
|
/* If file's gc generation has changed since last backup, just copy it*/
|
|
|
|
if (prev_file && prev_file->generation == file->generation)
|
|
|
|
{
|
|
|
|
current_file_size = file_size(file->path);
|
|
|
|
|
|
|
|
if (prev_file->write_size == BYTES_INVALID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
*skip_size = prev_file->write_size;
|
|
|
|
|
|
|
|
if (current_file_size >= prev_file->write_size)
|
|
|
|
{
|
|
|
|
elog(LOG, "Backup file %s partially: prev_size %lu, current_size %lu",
|
|
|
|
file->path, prev_file->write_size, current_file_size);
|
|
|
|
result = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
elog(ERROR, "Something is wrong with %s. current_file_size %lu, prev %lu",
|
|
|
|
file->path, current_file_size, prev_file->write_size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
elog(LOG, "Copy full %s.", file->path);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-30 09:58:55 +03:00
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Take a backup of the PGDATA at a file level.
|
|
|
|
* Copy all directories and files listed in backup_files_list.
|
|
|
|
* If the file is 'datafile' (regular relation's main fork), read it page by page,
|
|
|
|
* verify checksum and copy.
|
|
|
|
* In incremental backup mode, copy only files or datafiles' pages changed after
|
|
|
|
* previous backup.
|
|
|
|
* TODO review
|
2014-01-30 09:58:55 +03:00
|
|
|
*/
|
2012-05-18 11:54:36 +03:00
|
|
|
static void
|
2016-02-29 19:23:48 +02:00
|
|
|
backup_files(void *arg)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
int i;
|
2016-02-29 19:23:48 +02:00
|
|
|
backup_files_args *arguments = (backup_files_args *) arg;
|
2017-04-18 10:41:02 +02:00
|
|
|
int n_backup_files_list = parray_num(arguments->backup_files_list);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* backup a file or create a directory */
|
2017-04-18 10:41:02 +02:00
|
|
|
for (i = 0; i < n_backup_files_list; i++)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct stat buf;
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
pgFile *file = (pgFile *) parray_get(arguments->backup_files_list, i);
|
2016-09-02 19:38:39 +02:00
|
|
|
if (__sync_lock_test_and_set(&file->lock, 1) != 0)
|
|
|
|
continue;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* check for interrupt */
|
|
|
|
if (interrupted)
|
2016-01-19 05:41:30 +02:00
|
|
|
elog(ERROR, "interrupted during backup");
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (progress)
|
|
|
|
elog(LOG, "Progress: (%d/%d). Process file \"%s\"",
|
|
|
|
i + 1, n_backup_files_list, file->path);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* stat file to check its current state */
|
2012-05-18 11:54:36 +03:00
|
|
|
ret = stat(file->path, &buf);
|
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
if (errno == ENOENT)
|
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* If file is not found, this is not en error.
|
|
|
|
* It could have been deleted by concurrent postgres transaction.
|
|
|
|
*/
|
2012-05-18 11:54:36 +03:00
|
|
|
file->write_size = BYTES_INVALID;
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "File \"%s\" is not found", file->path);
|
2012-05-18 11:54:36 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-01-19 05:41:30 +02:00
|
|
|
elog(ERROR,
|
2017-04-18 10:41:02 +02:00
|
|
|
"can't stat file to backup \"%s\": %s",
|
2012-05-18 11:54:36 +03:00
|
|
|
file->path, strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* We have already copied all directories */
|
2012-05-18 11:54:36 +03:00
|
|
|
if (S_ISDIR(buf.st_mode))
|
2016-02-29 19:23:48 +02:00
|
|
|
continue;
|
2017-04-18 10:41:02 +02:00
|
|
|
|
|
|
|
if (S_ISREG(buf.st_mode))
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
/* skip files which have not been modified since last backup */
|
2017-04-18 10:41:02 +02:00
|
|
|
/* TODO Implement: compare oldfile and newfile checksum. Now it's just a stub */
|
|
|
|
if (arguments->prev_backup_filelist)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
pgFile *prev_file = NULL;
|
2017-04-18 10:41:02 +02:00
|
|
|
pgFile **p = (pgFile **) parray_bsearch(arguments->prev_backup_filelist,
|
|
|
|
file, pgFileComparePath);
|
2016-02-29 19:23:48 +02:00
|
|
|
if (p)
|
|
|
|
prev_file = *p;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (prev_file && false)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
file->write_size = BYTES_INVALID;
|
2017-04-18 10:41:02 +02:00
|
|
|
if (verbose)
|
|
|
|
elog(LOG, "File \"%s\" has not changed since previous backup",
|
|
|
|
file->path);
|
2012-05-18 11:54:36 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy the file into backup */
|
2017-02-13 10:44:53 +02:00
|
|
|
if (file->is_datafile)
|
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
if (is_compressed_data_file(file))
|
2017-02-13 10:44:53 +02:00
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
/* TODO review */
|
|
|
|
size_t skip_size = 0;
|
|
|
|
if (backup_compressed_file_partially(file, arguments, &skip_size))
|
|
|
|
{
|
|
|
|
/* backup cfs segment partly */
|
|
|
|
if (!copy_file_partly(arguments->from_root,
|
|
|
|
arguments->to_root,
|
|
|
|
file, skip_size))
|
|
|
|
{
|
|
|
|
/* record as skipped file in file_xxx.txt */
|
|
|
|
file->write_size = BYTES_INVALID;
|
|
|
|
elog(LOG, "skip");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!copy_file(arguments->from_root,
|
|
|
|
arguments->to_root,
|
|
|
|
file))
|
2017-02-13 10:44:53 +02:00
|
|
|
{
|
|
|
|
/* record as skipped file in file_xxx.txt */
|
|
|
|
file->write_size = BYTES_INVALID;
|
|
|
|
elog(LOG, "skip");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 10:41:02 +02:00
|
|
|
else if (!backup_data_file(arguments->from_root,
|
|
|
|
arguments->to_root, file,
|
|
|
|
arguments->prev_backup_start_lsn))
|
2017-02-13 10:44:53 +02:00
|
|
|
{
|
|
|
|
file->write_size = BYTES_INVALID;
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "File \"%s\" was not copied to backup", file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!copy_file(arguments->from_root,
|
|
|
|
arguments->to_root,
|
|
|
|
file))
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
|
|
|
file->write_size = BYTES_INVALID;
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "File \"%s\" was not copied to backup", file->path);
|
2012-05-18 11:54:36 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
elog(LOG, "File \"%s\". Copied %lu bytes",
|
|
|
|
file->path, (unsigned long) file->write_size);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
else
|
2016-01-14 09:36:39 +02:00
|
|
|
elog(LOG, "unexpected file type %d", buf.st_mode);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Append files to the backup list array.
|
2017-04-18 10:41:02 +02:00
|
|
|
* TODO review
|
2012-05-18 11:54:36 +03:00
|
|
|
*/
|
|
|
|
static void
|
2017-02-25 14:12:07 +02:00
|
|
|
add_pgdata_files(parray *files, const char *root)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-16 16:23:43 +02:00
|
|
|
size_t i;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* list files with the logical path. omit $PGDATA */
|
2017-02-25 14:12:07 +02:00
|
|
|
dir_list_file(files, root, true, true, false);
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* mark files that are possible datafile as 'datafile' */
|
2017-02-25 14:12:07 +02:00
|
|
|
for (i = 0; i < parray_num(files); i++)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
pgFile *file = (pgFile *) parray_get(files, i);
|
2017-02-16 16:23:43 +02:00
|
|
|
char *relative;
|
|
|
|
char *fname;
|
|
|
|
size_t path_len;
|
2012-05-18 11:54:36 +03:00
|
|
|
|
|
|
|
/* data file must be a regular file */
|
|
|
|
if (!S_ISREG(file->mode))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* data files are under "base", "global", or "pg_tblspc" */
|
2017-04-18 10:41:02 +02:00
|
|
|
relative = GetRelativePath(file->path, root);
|
2017-02-25 14:12:07 +02:00
|
|
|
if (!path_is_prefix_of_path("base", relative) &&
|
2017-02-13 10:44:53 +02:00
|
|
|
/*!path_is_prefix_of_path("global", relative) &&*/ //TODO What's wrong with this line?
|
2017-03-24 15:58:35 +02:00
|
|
|
!path_is_prefix_of_path(PG_TBLSPC_DIR, relative))
|
2012-05-18 11:54:36 +03:00
|
|
|
continue;
|
|
|
|
|
2016-11-28 13:35:22 +02:00
|
|
|
/* Get file name from path */
|
|
|
|
fname = last_dir_separator(relative);
|
2017-02-16 16:23:43 +02:00
|
|
|
if (fname == NULL)
|
|
|
|
fname = relative;
|
|
|
|
else
|
|
|
|
fname++;
|
2016-11-22 16:38:03 +02:00
|
|
|
|
2017-02-13 10:44:53 +02:00
|
|
|
/* Remove temp tables from the list */
|
2016-11-28 13:35:22 +02:00
|
|
|
if (fname[0] == 't' && isdigit(fname[1]))
|
|
|
|
{
|
|
|
|
pgFileFree(file);
|
2017-02-25 14:12:07 +02:00
|
|
|
parray_remove(files, i);
|
2016-11-28 13:35:22 +02:00
|
|
|
i--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
path_len = strlen(file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
/* Get link ptrack file to relations files */
|
2017-02-16 16:23:43 +02:00
|
|
|
if (path_len > 6 &&
|
|
|
|
strncmp(file->path + (path_len - 6), "ptrack", 6) == 0)
|
2016-02-27 20:07:55 +02:00
|
|
|
{
|
2017-02-16 16:23:43 +02:00
|
|
|
pgFile *search_file;
|
|
|
|
pgFile **pre_search_file;
|
|
|
|
int segno = 0;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
pgFile tmp_file;
|
|
|
|
|
2016-05-01 21:05:18 +02:00
|
|
|
tmp_file.path = pg_strdup(file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
|
|
|
|
/* Segno fits into 6 digits since it is not more than 4000 */
|
2016-05-01 21:05:18 +02:00
|
|
|
if (segno > 0)
|
2017-02-16 16:23:43 +02:00
|
|
|
sprintf(tmp_file.path + path_len - 7, ".%d", segno);
|
2016-05-01 21:05:18 +02:00
|
|
|
else
|
2017-02-16 16:23:43 +02:00
|
|
|
tmp_file.path[path_len - 7] = '\0';
|
2017-02-13 10:44:53 +02:00
|
|
|
|
2017-02-25 14:12:07 +02:00
|
|
|
pre_search_file = (pgFile **) parray_bsearch(files,
|
2017-02-16 16:23:43 +02:00
|
|
|
&tmp_file,
|
|
|
|
pgFileComparePath);
|
2017-02-13 10:44:53 +02:00
|
|
|
|
2016-05-01 21:05:18 +02:00
|
|
|
if (pre_search_file != NULL)
|
|
|
|
{
|
|
|
|
search_file = *pre_search_file;
|
|
|
|
search_file->ptrack_path = pg_strdup(file->path);
|
|
|
|
search_file->segno = segno;
|
2017-02-16 16:23:43 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-05-01 21:05:18 +02:00
|
|
|
pg_free(tmp_file.path);
|
|
|
|
break;
|
|
|
|
}
|
2017-02-16 16:23:43 +02:00
|
|
|
|
2016-05-01 21:05:18 +02:00
|
|
|
pg_free(tmp_file.path);
|
|
|
|
segno++;
|
2016-02-27 20:07:55 +02:00
|
|
|
}
|
2016-05-01 21:05:18 +02:00
|
|
|
|
2017-02-13 10:44:53 +02:00
|
|
|
/* Remove ptrack file itself from backup list */
|
2016-02-27 20:07:55 +02:00
|
|
|
pgFileFree(file);
|
2017-02-25 14:12:07 +02:00
|
|
|
parray_remove(files, i);
|
2016-02-27 20:07:55 +02:00
|
|
|
i--;
|
|
|
|
}
|
2016-11-22 16:38:03 +02:00
|
|
|
/* compress map file it is not data file */
|
2017-02-16 16:23:43 +02:00
|
|
|
else if (path_len > 4 &&
|
|
|
|
strncmp(file->path + (path_len - 4), ".cfm", 4) == 0)
|
2016-09-06 15:30:54 +02:00
|
|
|
{
|
2017-02-16 16:23:43 +02:00
|
|
|
pgFile **pre_search_file;
|
|
|
|
pgFile tmp_file;
|
2016-11-22 16:38:03 +02:00
|
|
|
|
|
|
|
tmp_file.path = pg_strdup(file->path);
|
2017-02-16 16:23:43 +02:00
|
|
|
tmp_file.path[path_len - 4] = '\0';
|
2017-02-25 14:12:07 +02:00
|
|
|
pre_search_file = (pgFile **) parray_bsearch(files,
|
2017-02-16 16:23:43 +02:00
|
|
|
&tmp_file,
|
|
|
|
pgFileComparePath);
|
2016-11-22 16:38:03 +02:00
|
|
|
if (pre_search_file != NULL)
|
|
|
|
{
|
2017-02-16 16:23:43 +02:00
|
|
|
FileMap *map;
|
|
|
|
int md = open(file->path, O_RDWR|PG_BINARY, 0);
|
|
|
|
|
2017-02-13 10:44:53 +02:00
|
|
|
if (md < 0)
|
2017-02-25 14:12:07 +02:00
|
|
|
elog(ERROR, "cannot open cfm file '%s'", file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
|
|
|
|
map = cfs_mmap(md);
|
|
|
|
if (map == MAP_FAILED)
|
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
elog(LOG, "cfs_compression_ration failed to map file %s: %m", file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
close(md);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*pre_search_file)->generation = map->generation;
|
|
|
|
|
|
|
|
if (cfs_munmap(map) < 0)
|
2017-02-25 14:12:07 +02:00
|
|
|
elog(LOG, "CFS failed to unmap file %s: %m",
|
2017-02-16 16:23:43 +02:00
|
|
|
file->path);
|
2017-02-13 10:44:53 +02:00
|
|
|
if (close(md) < 0)
|
2017-02-25 14:12:07 +02:00
|
|
|
elog(LOG, "CFS failed to close file %s: %m",
|
2017-02-16 16:23:43 +02:00
|
|
|
file->path);
|
2016-11-22 16:38:03 +02:00
|
|
|
}
|
2017-02-13 10:44:53 +02:00
|
|
|
else
|
2017-02-16 16:23:43 +02:00
|
|
|
elog(ERROR, "corresponding segment '%s' is not found",
|
|
|
|
tmp_file.path);
|
2017-02-13 10:44:53 +02:00
|
|
|
|
2016-11-22 16:38:03 +02:00
|
|
|
pg_free(tmp_file.path);
|
|
|
|
}
|
2017-02-16 16:23:43 +02:00
|
|
|
/* name of data file start with digit */
|
|
|
|
else if (isdigit(fname[0]))
|
|
|
|
{
|
|
|
|
int find_dot;
|
|
|
|
int check_digit;
|
|
|
|
char *text_segno;
|
|
|
|
|
|
|
|
file->is_datafile = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find segment number.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (find_dot = (int) path_len - 1;
|
|
|
|
file->path[find_dot] != '.' && find_dot >= 0;
|
|
|
|
find_dot--);
|
|
|
|
/* There is not segment number */
|
|
|
|
if (find_dot <= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
text_segno = file->path + find_dot + 1;
|
|
|
|
for (check_digit = 0; text_segno[check_digit] != '\0'; check_digit++)
|
|
|
|
if (!isdigit(text_segno[check_digit]))
|
|
|
|
{
|
|
|
|
check_digit = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_digit != -1)
|
|
|
|
file->segno = (int) strtol(text_segno, NULL, 10);
|
|
|
|
}
|
2016-11-22 16:38:03 +02:00
|
|
|
}
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Output the list of files to backup catalog DATABASE_FILE_LIST
|
2012-05-18 11:54:36 +03:00
|
|
|
*/
|
|
|
|
static void
|
2017-04-18 10:41:02 +02:00
|
|
|
write_backup_file_list(parray *files, const char *root)
|
2012-05-18 11:54:36 +03:00
|
|
|
{
|
2017-02-25 14:12:07 +02:00
|
|
|
FILE *fp;
|
|
|
|
char path[MAXPGPATH];
|
2012-05-18 11:54:36 +03:00
|
|
|
|
2017-04-12 16:39:20 +02:00
|
|
|
pgBackupGetPath(¤t, path, lengthof(path), DATABASE_FILE_LIST);
|
2017-02-25 14:12:07 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
fp = fopen(path, "wt");
|
2017-04-12 16:39:20 +02:00
|
|
|
if (fp == NULL)
|
|
|
|
elog(ERROR, "cannot open file list \"%s\": %s", path,
|
|
|
|
strerror(errno));
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2017-04-12 16:39:20 +02:00
|
|
|
print_file_list(fp, files, root);
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2017-04-12 16:39:20 +02:00
|
|
|
fclose(fp);
|
2012-05-18 11:54:36 +03:00
|
|
|
}
|
2016-01-15 16:47:38 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A helper function to create the path of a relation file and segment.
|
|
|
|
* The returned path is palloc'd
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
char *segpath;
|
|
|
|
|
|
|
|
path = relpathperm(rnode, forknum);
|
|
|
|
if (segno > 0)
|
|
|
|
{
|
|
|
|
segpath = psprintf("%s.%u", path, segno);
|
|
|
|
pfree(path);
|
|
|
|
return segpath;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-04-18 10:41:02 +02:00
|
|
|
* Find pgfile by given rnode in the backup_files_list
|
|
|
|
* and add given blkno to its pagemap.
|
2016-01-15 16:47:38 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
char *rel_path;
|
|
|
|
BlockNumber blkno_inseg;
|
|
|
|
int segno;
|
|
|
|
pgFile *file_item = NULL;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
segno = blkno / RELSEG_SIZE;
|
|
|
|
blkno_inseg = blkno % RELSEG_SIZE;
|
|
|
|
|
|
|
|
rel_path = datasegpath(rnode, forknum, segno);
|
|
|
|
path = pg_malloc(strlen(rel_path) + strlen(pgdata) + 2);
|
|
|
|
sprintf(path, "%s/%s", pgdata, rel_path);
|
|
|
|
|
|
|
|
for (j = 0; j < parray_num(backup_files_list); j++)
|
|
|
|
{
|
|
|
|
pgFile *p = (pgFile *) parray_get(backup_files_list, j);
|
|
|
|
|
|
|
|
if (strcmp(p->path, path) == 0)
|
|
|
|
{
|
|
|
|
file_item = p;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have any record of this file in the file map, it means
|
|
|
|
* that it's a relation that did not have much activity since the last
|
|
|
|
* backup. We can safely ignore it. If it is a new relation file, the
|
|
|
|
* backup would simply copy it as-is.
|
|
|
|
*/
|
|
|
|
if (file_item)
|
|
|
|
datapagemap_add(&file_item->pagemap, blkno_inseg);
|
|
|
|
|
|
|
|
pg_free(path);
|
|
|
|
pg_free(rel_path);
|
|
|
|
}
|
2016-02-27 20:07:55 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* TODO review it */
|
2017-02-25 14:12:07 +02:00
|
|
|
static void
|
|
|
|
make_pagemap_from_ptrack(parray *files)
|
2016-02-27 20:07:55 +02:00
|
|
|
{
|
|
|
|
int i;
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2016-02-27 20:07:55 +02:00
|
|
|
for (i = 0; i < parray_num(files); i++)
|
|
|
|
{
|
|
|
|
pgFile *p = (pgFile *) parray_get(files, i);
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2016-02-27 20:07:55 +02:00
|
|
|
if (p->ptrack_path != NULL)
|
|
|
|
{
|
2017-04-18 10:41:02 +02:00
|
|
|
char * tablespace;
|
|
|
|
Oid db_oid,
|
|
|
|
rel_oid,
|
|
|
|
tablespace_oid = 0;
|
|
|
|
int sep_iter,
|
|
|
|
sep_count = 0;
|
|
|
|
char *ptrack_nonparsed;
|
|
|
|
size_t ptrack_nonparsed_size = 0;
|
2016-05-01 21:05:18 +02:00
|
|
|
size_t start_addr;
|
|
|
|
|
2017-03-24 15:58:35 +02:00
|
|
|
tablespace = strstr(p->ptrack_path, PG_TBLSPC_DIR);
|
2016-02-27 20:07:55 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
if (tablespace)
|
|
|
|
sscanf(tablespace+strlen(PG_TBLSPC_DIR)+1, "%i/", &tablespace_oid);
|
2016-02-28 01:10:27 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* Path has format:
|
|
|
|
* pg_tblspc/tablespace_oid/tablespace_version_subdir/db_oid/rel_oid
|
|
|
|
* base/db_oid/rel_oid
|
|
|
|
*/
|
|
|
|
sep_iter = strlen(p->path);
|
|
|
|
while (sep_count != 2 && sep_iter >= 0)
|
|
|
|
sep_iter--;
|
|
|
|
|
|
|
|
sscanf(p->path + sep_iter + 1, "%u/%u", &db_oid, &rel_oid);
|
|
|
|
elog(ERROR, "make_pagemap_from_ptrack. %s, %s, ",
|
|
|
|
p->path, p->path + sep_iter + 1);
|
|
|
|
|
|
|
|
ptrack_nonparsed = pg_ptrack_get_and_clear(tablespace_oid, db_oid,
|
|
|
|
rel_oid, &ptrack_nonparsed_size);
|
|
|
|
|
|
|
|
/* TODO What is 8? */
|
2016-05-01 21:05:18 +02:00
|
|
|
start_addr = (RELSEG_SIZE/8)*p->segno;
|
2017-04-18 10:41:02 +02:00
|
|
|
if (start_addr + RELSEG_SIZE/8 > ptrack_nonparsed_size)
|
|
|
|
p->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
|
|
|
|
else
|
|
|
|
p->pagemap.bitmapsize = RELSEG_SIZE/8;
|
|
|
|
|
2016-05-01 21:05:18 +02:00
|
|
|
p->pagemap.bitmap = pg_malloc(p->pagemap.bitmapsize);
|
2017-04-18 10:41:02 +02:00
|
|
|
memcpy(p->pagemap.bitmap, ptrack_nonparsed+start_addr, p->pagemap.bitmapsize);
|
|
|
|
pg_free(ptrack_nonparsed);
|
2016-02-27 20:07:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-26 14:56:32 +02:00
|
|
|
|
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/* TODO Add comment */
|
2016-05-26 14:56:32 +02:00
|
|
|
static bool
|
|
|
|
stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
|
|
|
|
{
|
|
|
|
static uint32 prevtimeline = 0;
|
|
|
|
static XLogRecPtr prevpos = InvalidXLogRecPtr;
|
|
|
|
|
|
|
|
/* we assume that we get called once at the end of each segment */
|
|
|
|
if (verbose && segment_finished)
|
|
|
|
fprintf(stderr, _("%s: finished segment at %X/%X (timeline %u)\n"),
|
2017-04-18 10:41:02 +02:00
|
|
|
PROGRAM_NAME, (uint32) (xlogpos >> 32), (uint32) xlogpos,
|
2016-05-26 14:56:32 +02:00
|
|
|
timeline);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we report the previous, not current, position here. After a
|
|
|
|
* timeline switch, xlogpos points to the beginning of the segment because
|
|
|
|
* that's where we always begin streaming. Reporting the end of previous
|
|
|
|
* timeline isn't totally accurate, because the next timeline can begin
|
|
|
|
* slightly before the end of the WAL that we received on the previous
|
|
|
|
* timeline, but it's close enough for reporting purposes.
|
|
|
|
*/
|
|
|
|
if (prevtimeline != 0 && prevtimeline != timeline)
|
|
|
|
fprintf(stderr, _("%s: switched to timeline %u at %X/%X\n"),
|
2017-04-18 10:41:02 +02:00
|
|
|
PROGRAM_NAME, timeline,
|
2016-05-26 14:56:32 +02:00
|
|
|
(uint32) (prevpos >> 32), (uint32) prevpos);
|
|
|
|
|
|
|
|
if (stop_backup_lsn != InvalidXLogRecPtr && xlogpos > stop_backup_lsn)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
prevtimeline = timeline;
|
|
|
|
prevpos = xlogpos;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start the log streaming
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
StreamLog(void *arg)
|
|
|
|
{
|
|
|
|
XLogRecPtr startpos;
|
|
|
|
TimeLineID starttli;
|
|
|
|
char *basedir = (char *)arg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connect in replication mode to the server
|
|
|
|
*/
|
|
|
|
if (conn == NULL)
|
|
|
|
conn = GetConnection();
|
|
|
|
if (!conn)
|
2016-11-14 17:10:02 +02:00
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&check_stream_mut);
|
2016-05-26 14:56:32 +02:00
|
|
|
/* Error message already written in GetConnection() */
|
|
|
|
return;
|
2016-11-14 17:10:02 +02:00
|
|
|
}
|
2016-05-26 14:56:32 +02:00
|
|
|
|
|
|
|
if (!CheckServerVersionForStreaming(conn))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Error message already written in CheckServerVersionForStreaming().
|
|
|
|
* There's no hope of recovering from a version mismatch, so don't
|
|
|
|
* retry.
|
|
|
|
*/
|
|
|
|
disconnect_and_exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Identify server, obtaining start LSN position and current timeline ID
|
|
|
|
* at the same time, necessary if not valid data can be found in the
|
|
|
|
* existing output directory.
|
|
|
|
*/
|
|
|
|
if (!RunIdentifySystem(conn, NULL, &starttli, &startpos, NULL))
|
|
|
|
disconnect_and_exit(1);
|
|
|
|
|
2016-11-14 17:10:02 +02:00
|
|
|
/* Ok we have normal stream connect and main process can work again */
|
|
|
|
pthread_mutex_unlock(&check_stream_mut);
|
2017-04-18 10:41:02 +02:00
|
|
|
|
2016-10-13 16:25:53 +02:00
|
|
|
/*
|
|
|
|
* We must use startpos as start_lsn from start_backup
|
|
|
|
*/
|
|
|
|
startpos = current.start_lsn;
|
2016-05-26 14:56:32 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Always start streaming at the beginning of a segment
|
|
|
|
*/
|
|
|
|
startpos -= startpos % XLOG_SEG_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start the replication
|
|
|
|
*/
|
|
|
|
if (verbose)
|
|
|
|
fprintf(stderr,
|
|
|
|
_("%s: starting log streaming at %X/%X (timeline %u)\n"),
|
2017-04-18 10:41:02 +02:00
|
|
|
PROGRAM_NAME, (uint32) (startpos >> 32), (uint32) startpos,
|
2016-05-26 14:56:32 +02:00
|
|
|
starttli);
|
|
|
|
|
2016-08-30 16:21:02 +02:00
|
|
|
#if PG_VERSION_NUM >= 90600
|
2016-12-20 15:22:08 +02:00
|
|
|
{
|
|
|
|
StreamCtl ctl;
|
|
|
|
ctl.startpos = startpos;
|
|
|
|
ctl.timeline = starttli;
|
|
|
|
ctl.sysidentifier = NULL;
|
|
|
|
ctl.basedir = basedir;
|
|
|
|
ctl.stream_stop = stop_streaming;
|
|
|
|
ctl.standby_message_timeout = standby_message_timeout;
|
|
|
|
ctl.partial_suffix = NULL;
|
|
|
|
ctl.synchronous = false;
|
|
|
|
ctl.mark_done = false;
|
|
|
|
if(ReceiveXlogStream(conn, &ctl) == false)
|
|
|
|
elog(ERROR, "Problem in recivexlog");
|
|
|
|
}
|
2016-08-30 16:21:02 +02:00
|
|
|
#else
|
2016-10-25 19:21:27 +02:00
|
|
|
if(ReceiveXlogStream(conn, startpos, starttli, NULL, basedir,
|
2016-09-06 13:06:09 +02:00
|
|
|
stop_streaming, standby_message_timeout, NULL,
|
2016-10-25 19:21:27 +02:00
|
|
|
false, false) == false)
|
|
|
|
elog(ERROR, "Problem in recivexlog");
|
2016-08-30 16:21:02 +02:00
|
|
|
#endif
|
2016-05-26 14:56:32 +02:00
|
|
|
|
|
|
|
PQfinish(conn);
|
|
|
|
conn = NULL;
|
|
|
|
}
|
2017-02-13 10:44:53 +02:00
|
|
|
|
2017-04-18 10:41:02 +02:00
|
|
|
/*
|
|
|
|
* cfs_mmap() and cfs_munmap() function definitions mirror ones
|
|
|
|
* from cfs.h, but doesn't use atomic variables, since they are
|
|
|
|
* not allowed in frontend code.
|
|
|
|
* TODO Is it so?
|
|
|
|
* Since we cannot take atomic lock on files compressed by CFS,
|
|
|
|
* it should care about not changing files while backup is running.
|
|
|
|
*/
|
2017-02-13 10:44:53 +02:00
|
|
|
FileMap* cfs_mmap(int md)
|
|
|
|
{
|
|
|
|
FileMap* map;
|
|
|
|
#ifdef WIN32
|
|
|
|
HANDLE mh = CreateFileMapping(_get_osfhandle(md), NULL, PAGE_READWRITE,
|
|
|
|
0, (DWORD)sizeof(FileMap), NULL);
|
|
|
|
if (mh == NULL)
|
|
|
|
return (FileMap*)MAP_FAILED;
|
|
|
|
|
|
|
|
map = (FileMap*)MapViewOfFile(mh, FILE_MAP_ALL_ACCESS, 0, 0, 0);
|
|
|
|
CloseHandle(mh);
|
|
|
|
if (map == NULL)
|
|
|
|
return (FileMap*)MAP_FAILED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
map = (FileMap*)mmap(NULL, sizeof(FileMap),
|
|
|
|
PROT_WRITE | PROT_READ, MAP_SHARED, md, 0);
|
|
|
|
#endif
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cfs_munmap(FileMap* map)
|
|
|
|
{
|
|
|
|
#ifdef WIN32
|
|
|
|
return UnmapViewOfFile(map) ? 0 : -1;
|
|
|
|
#else
|
|
|
|
return munmap(map, sizeof(FileMap));
|
|
|
|
#endif
|
|
|
|
}
|