mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2024-11-24 08:52:38 +02:00
Added support for ptrack 2.0
This commit is contained in:
parent
d507c8ab0d
commit
64d35c5022
2
Makefile
2
Makefile
@ -6,7 +6,7 @@ OBJS = src/utils/configuration.o src/utils/json.o src/utils/logger.o \
|
||||
|
||||
OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o src/merge.o \
|
||||
src/parsexlog.o src/pg_probackup.o src/restore.o src/show.o src/util.o \
|
||||
src/parsexlog.o src/ptrack.o src/pg_probackup.o src/restore.o src/show.o src/util.o \
|
||||
src/validate.o
|
||||
|
||||
# borrowed files
|
||||
|
@ -155,11 +155,18 @@ Once you have `pg_probackup` installed, complete [the setup](https://github.com/
|
||||
## Building from source
|
||||
### Linux
|
||||
|
||||
To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. To install `pg_probackup`, execute this in the module's directory:
|
||||
To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. Execute this in the module's directory:
|
||||
|
||||
```shell
|
||||
make USE_PGXS=1 PG_CONFIG=<path_to_pg_config> top_srcdir=<path_to_PostgreSQL_source_tree>
|
||||
```
|
||||
|
||||
The alternative way, without using the PGXS infrastructure, is to place `pg_probackup` source directory into `contrib` directory and build it there. Example:
|
||||
|
||||
```shell
|
||||
cd <path_to_PostgreSQL_source_tree> && git clone https://github.com/postgrespro/pg_probackup contrib/pg_probackup && cd contrib/pg_probackup && make
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
Currently pg_probackup can be build using only MSVC 2013.
|
||||
|
585
src/backup.c
585
src/backup.c
@ -25,14 +25,6 @@
|
||||
#include "utils/thread.h"
|
||||
#include "utils/file.h"
|
||||
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values synchronized with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
|
||||
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
|
||||
static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
|
||||
@ -74,8 +66,6 @@ typedef struct
|
||||
static pthread_t stream_thread;
|
||||
static StreamThreadArg stream_thread_arg = {"", NULL, 1};
|
||||
|
||||
static int is_ptrack_enable = false;
|
||||
bool is_ptrack_support = false;
|
||||
bool exclusive_backup = false;
|
||||
|
||||
/* Is pg_start_backup() was executed */
|
||||
@ -102,7 +92,6 @@ static XLogRecPtr wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, TimeLineID tli
|
||||
bool in_prev_segment, bool segment_only,
|
||||
int timeout_elevel, bool in_stream_dir);
|
||||
|
||||
static void make_pagemap_from_ptrack(parray* files, PGconn* backup_conn);
|
||||
static void *StreamLog(void *arg);
|
||||
static void IdentifySystem(StreamThreadArg *stream_thread_arg);
|
||||
|
||||
@ -113,19 +102,6 @@ static parray *get_database_map(PGconn *pg_startbackup_conn);
|
||||
/* pgpro specific functions */
|
||||
static bool pgpro_support(PGconn *conn);
|
||||
|
||||
/* Ptrack functions */
|
||||
static void pg_ptrack_clear(PGconn *backup_conn);
|
||||
static bool pg_ptrack_support(PGconn *backup_conn);
|
||||
static bool pg_ptrack_enable(PGconn *backup_conn);
|
||||
static bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid,
|
||||
PGconn *backup_conn);
|
||||
static char *pg_ptrack_get_and_clear(Oid tablespace_oid,
|
||||
Oid db_oid,
|
||||
Oid rel_oid,
|
||||
size_t *result_size,
|
||||
PGconn *backup_conn);
|
||||
static XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn);
|
||||
|
||||
/* Check functions */
|
||||
static bool pg_checksum_enable(PGconn *conn);
|
||||
static bool pg_is_in_recovery(PGconn *conn);
|
||||
@ -177,6 +153,9 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
PGconn *master_conn = NULL;
|
||||
PGconn *pg_startbackup_conn = NULL;
|
||||
|
||||
/* for fancy reporting */
|
||||
time_t start_time, end_time;
|
||||
|
||||
elog(LOG, "Database backup start");
|
||||
if(current.external_dir_str)
|
||||
{
|
||||
@ -229,7 +208,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
*/
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn);
|
||||
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn, nodeInfo);
|
||||
|
||||
if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr)
|
||||
{
|
||||
@ -242,8 +221,8 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
}
|
||||
|
||||
/* Clear ptrack files for FULL and PAGE backup */
|
||||
if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && is_ptrack_enable)
|
||||
pg_ptrack_clear(backup_conn);
|
||||
if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && nodeInfo->is_ptrack_enable)
|
||||
pg_ptrack_clear(backup_conn, nodeInfo->ptrack_version_num);
|
||||
|
||||
/* notify start of backup to PostgreSQL server */
|
||||
time2iso(label, lengthof(label), current.start_time);
|
||||
@ -379,22 +358,41 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
/*
|
||||
* Build page mapping in incremental mode.
|
||||
*/
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE ||
|
||||
current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
/*
|
||||
* Build the page map. Obtain information about changed pages
|
||||
* reading WAL segments present in archives up to the point
|
||||
* where this backup has started.
|
||||
*/
|
||||
extractPageMap(arclog_path, current.tli, instance_config.xlog_seg_size,
|
||||
prev_backup->start_lsn, current.start_lsn);
|
||||
}
|
||||
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
/*
|
||||
* Build the page map from ptrack information.
|
||||
*/
|
||||
make_pagemap_from_ptrack(backup_files_list, backup_conn);
|
||||
elog(INFO, "Compiling pagemap of changed blocks");
|
||||
time(&start_time);
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PAGE)
|
||||
{
|
||||
/*
|
||||
* Build the page map. Obtain information about changed pages
|
||||
* reading WAL segments present in archives up to the point
|
||||
* where this backup has started.
|
||||
*/
|
||||
extractPageMap(arclog_path, current.tli, instance_config.xlog_seg_size,
|
||||
prev_backup->start_lsn, current.start_lsn);
|
||||
}
|
||||
else if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
/*
|
||||
* Build the page map from ptrack information.
|
||||
*/
|
||||
if (nodeInfo->ptrack_version_num == 20)
|
||||
make_pagemap_from_ptrack_2(backup_files_list, backup_conn,
|
||||
nodeInfo->ptrack_schema,
|
||||
prev_backup_start_lsn);
|
||||
else if (nodeInfo->ptrack_version_num == 15 ||
|
||||
nodeInfo->ptrack_version_num == 16 ||
|
||||
nodeInfo->ptrack_version_num == 17)
|
||||
make_pagemap_from_ptrack_1(backup_files_list, backup_conn);
|
||||
}
|
||||
|
||||
time(&end_time);
|
||||
elog(INFO, "Pagemap compiled, time elapsed %.0f sec",
|
||||
difftime(end_time, start_time));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -454,6 +452,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
{
|
||||
backup_files_arg *arg = &(threads_args[i]);
|
||||
|
||||
arg->nodeInfo = nodeInfo;
|
||||
arg->from_root = instance_config.pgdata;
|
||||
arg->to_root = database_path;
|
||||
arg->external_prefix = external_prefix;
|
||||
@ -769,19 +768,24 @@ do_backup(time_t start_time, bool no_validate,
|
||||
elog(ERROR, "Failed to retrieve wal_segment_size");
|
||||
#endif
|
||||
|
||||
is_ptrack_support = pg_ptrack_support(backup_conn);
|
||||
if (is_ptrack_support)
|
||||
get_ptrack_version(backup_conn, &nodeInfo);
|
||||
// elog(WARNING, "ptrack_version_num %d", ptrack_version_num);
|
||||
|
||||
if (nodeInfo.ptrack_version_num > 0)
|
||||
{
|
||||
is_ptrack_enable = pg_ptrack_enable(backup_conn);
|
||||
if (nodeInfo.ptrack_version_num >= 20)
|
||||
nodeInfo.is_ptrack_enable = pg_ptrack_enable2(backup_conn);
|
||||
else
|
||||
nodeInfo.is_ptrack_enable = pg_ptrack_enable(backup_conn);
|
||||
}
|
||||
|
||||
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
|
||||
{
|
||||
if (!is_ptrack_support)
|
||||
if (nodeInfo.ptrack_version_num == 0)
|
||||
elog(ERROR, "This PostgreSQL instance does not support ptrack");
|
||||
else
|
||||
{
|
||||
if(!is_ptrack_enable)
|
||||
if (!nodeInfo.is_ptrack_enable)
|
||||
elog(ERROR, "Ptrack is disabled");
|
||||
}
|
||||
}
|
||||
@ -926,8 +930,7 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo)
|
||||
PQclear(res);
|
||||
|
||||
/* Do exclusive backup only for PostgreSQL 9.5 */
|
||||
exclusive_backup = nodeInfo->server_version < 90600 ||
|
||||
current.backup_mode == BACKUP_MODE_DIFF_PTRACK;
|
||||
exclusive_backup = nodeInfo->server_version < 90600;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1084,48 +1087,6 @@ pg_switch_wal(PGconn *conn)
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the instance supports ptrack
|
||||
* TODO Maybe we should rather check ptrack_version()?
|
||||
*/
|
||||
static bool
|
||||
pg_ptrack_support(PGconn *backup_conn)
|
||||
{
|
||||
PGresult *res_db;
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT proname FROM pg_proc WHERE proname='ptrack_version'",
|
||||
0, NULL);
|
||||
if (PQntuples(res_db) == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
PQclear(res_db);
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT pg_catalog.ptrack_version()",
|
||||
0, NULL);
|
||||
if (PQntuples(res_db) == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Now we support only ptrack versions upper than 1.5 */
|
||||
if (strcmp(PQgetvalue(res_db, 0, 0), "1.5") != 0 &&
|
||||
strcmp(PQgetvalue(res_db, 0, 0), "1.6") != 0 &&
|
||||
strcmp(PQgetvalue(res_db, 0, 0), "1.7") != 0)
|
||||
{
|
||||
elog(WARNING, "Update your ptrack to the version 1.5 or upper. Current version is %s", PQgetvalue(res_db, 0, 0));
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the instance is PostgresPro fork.
|
||||
*/
|
||||
@ -1210,23 +1171,6 @@ get_database_map(PGconn *conn)
|
||||
return database_map;
|
||||
}
|
||||
|
||||
/* Check if ptrack is enabled in target instance */
|
||||
static bool
|
||||
pg_ptrack_enable(PGconn *backup_conn)
|
||||
{
|
||||
PGresult *res_db;
|
||||
|
||||
res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL);
|
||||
|
||||
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Check if ptrack is enabled in target instance */
|
||||
static bool
|
||||
pg_checksum_enable(PGconn *conn)
|
||||
@ -1279,204 +1223,6 @@ pg_is_superuser(PGconn *conn)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Clear ptrack files in all databases of the instance we connected to */
|
||||
static void
|
||||
pg_ptrack_clear(PGconn *backup_conn)
|
||||
{
|
||||
PGresult *res_db,
|
||||
*res;
|
||||
const char *dbname;
|
||||
int i;
|
||||
Oid dbOid, tblspcOid;
|
||||
char *params[2];
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
|
||||
0, NULL);
|
||||
|
||||
for(i = 0; i < PQntuples(res_db); i++)
|
||||
{
|
||||
PGconn *tmp_conn;
|
||||
|
||||
dbname = PQgetvalue(res_db, i, 0);
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
continue;
|
||||
|
||||
dbOid = atoi(PQgetvalue(res_db, i, 1));
|
||||
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
||||
|
||||
tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
|
||||
dbname,
|
||||
instance_config.conn_opt.pguser);
|
||||
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
|
||||
0, NULL);
|
||||
PQclear(res);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params);
|
||||
PQclear(res);
|
||||
|
||||
pgut_disconnect(tmp_conn);
|
||||
}
|
||||
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
PQclear(res_db);
|
||||
}
|
||||
|
||||
static bool
|
||||
pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn)
|
||||
{
|
||||
char *params[2];
|
||||
char *dbname;
|
||||
PGresult *res_db;
|
||||
PGresult *res;
|
||||
bool result;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT datname FROM pg_database WHERE oid=$1",
|
||||
1, (const char **) params);
|
||||
/*
|
||||
* If database is not found, it's not an error.
|
||||
* It could have been deleted since previous backup.
|
||||
*/
|
||||
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
||||
return false;
|
||||
|
||||
dbname = PQgetvalue(res_db, 0, 0);
|
||||
|
||||
/* Always backup all files from template0 database */
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
PQclear(res_db);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()");
|
||||
|
||||
if (!parse_bool(PQgetvalue(res, 0, 0), &result))
|
||||
elog(ERROR,
|
||||
"result of pg_ptrack_get_and_clear_db() is invalid: %s",
|
||||
PQgetvalue(res, 0, 0));
|
||||
|
||||
PQclear(res);
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Read and clear ptrack files of the target relation.
|
||||
* Result is a bytea ptrack map of all segments of the target relation.
|
||||
* case 1: we know a tablespace_oid, db_oid, and rel_filenode
|
||||
* case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default)
|
||||
* case 3: we know only rel_filenode (because file in pg_global)
|
||||
*/
|
||||
static char *
|
||||
pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
size_t *result_size, PGconn *backup_conn)
|
||||
{
|
||||
PGconn *tmp_conn;
|
||||
PGresult *res_db,
|
||||
*res;
|
||||
char *params[2];
|
||||
char *result;
|
||||
char *val;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
|
||||
/* regular file (not in directory 'global') */
|
||||
if (db_oid != 0)
|
||||
{
|
||||
char *dbname;
|
||||
|
||||
sprintf(params[0], "%i", db_oid);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT datname FROM pg_database WHERE oid=$1",
|
||||
1, (const char **) params);
|
||||
/*
|
||||
* If database is not found, it's not an error.
|
||||
* It could have been deleted since previous backup.
|
||||
*/
|
||||
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
||||
return NULL;
|
||||
|
||||
dbname = PQgetvalue(res_db, 0, 0);
|
||||
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
|
||||
dbname,
|
||||
instance_config.conn_opt.pguser);
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u",
|
||||
dbname, tablespace_oid, rel_filenode);
|
||||
PQclear(res_db);
|
||||
pgut_disconnect(tmp_conn);
|
||||
}
|
||||
/* file in directory 'global' */
|
||||
else
|
||||
{
|
||||
/*
|
||||
* execute ptrack_get_and_clear for relation in pg_global
|
||||
* Use backup_conn, cause we can do it from any database.
|
||||
*/
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u",
|
||||
rel_filenode);
|
||||
}
|
||||
|
||||
val = PQgetvalue(res, 0, 0);
|
||||
|
||||
/* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x.
|
||||
* It should be fixed in future ptrack releases, but till then we
|
||||
* can parse it.
|
||||
*/
|
||||
if (strcmp("x", val+1) == 0)
|
||||
{
|
||||
/* Ptrack file is missing */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
||||
result_size);
|
||||
PQclear(res);
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for target LSN or WAL segment, containing target LSN.
|
||||
*
|
||||
@ -2212,7 +1958,7 @@ backup_files(void *arg)
|
||||
|
||||
if (!pg_atomic_test_set_flag(&file->lock))
|
||||
continue;
|
||||
elog(VERBOSE, "Copying file: \"%s\" ", file->path);
|
||||
elog(VERBOSE, "Copying file: \"%s\"", file->path);
|
||||
|
||||
/* check for interrupt */
|
||||
if (interrupted || thread_interrupted)
|
||||
@ -2289,6 +2035,9 @@ backup_files(void *arg)
|
||||
current.backup_mode,
|
||||
instance_config.compress_alg,
|
||||
instance_config.compress_level,
|
||||
arguments->nodeInfo->checksum_version,
|
||||
arguments->nodeInfo->ptrack_version_num,
|
||||
arguments->nodeInfo->ptrack_schema,
|
||||
true))
|
||||
{
|
||||
/* disappeared file not to be confused with 'not changed' */
|
||||
@ -2549,132 +2298,6 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
|
||||
pg_free(path);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a list of files in the instance to backup, build a pagemap for each
|
||||
* data file that has ptrack. Result is saved in the pagemap field of pgFile.
|
||||
* NOTE we rely on the fact that provided parray is sorted by file->path.
|
||||
*/
|
||||
static void
|
||||
make_pagemap_from_ptrack(parray *files, PGconn *backup_conn)
|
||||
{
|
||||
size_t i;
|
||||
Oid dbOid_with_ptrack_init = 0;
|
||||
Oid tblspcOid_with_ptrack_init = 0;
|
||||
char *ptrack_nonparsed = NULL;
|
||||
size_t ptrack_nonparsed_size = 0;
|
||||
|
||||
elog(LOG, "Compiling pagemap");
|
||||
for (i = 0; i < parray_num(files); i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(files, i);
|
||||
size_t start_addr;
|
||||
|
||||
/*
|
||||
* If there is a ptrack_init file in the database,
|
||||
* we must backup all its files, ignoring ptrack files for relations.
|
||||
*/
|
||||
if (file->is_database)
|
||||
{
|
||||
char *filename = strrchr(file->path, '/');
|
||||
|
||||
Assert(filename != NULL);
|
||||
filename++;
|
||||
|
||||
/*
|
||||
* The function pg_ptrack_get_and_clear_db returns true
|
||||
* if there was a ptrack_init file.
|
||||
* Also ignore ptrack files for global tablespace,
|
||||
* to avoid any possible specific errors.
|
||||
*/
|
||||
if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
|
||||
pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid, backup_conn))
|
||||
{
|
||||
dbOid_with_ptrack_init = file->dbOid;
|
||||
tblspcOid_with_ptrack_init = file->tblspcOid;
|
||||
}
|
||||
}
|
||||
|
||||
if (file->is_datafile)
|
||||
{
|
||||
if (file->tblspcOid == tblspcOid_with_ptrack_init &&
|
||||
file->dbOid == dbOid_with_ptrack_init)
|
||||
{
|
||||
/* ignore ptrack if ptrack_init exists */
|
||||
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* get ptrack bitmap once for all segments of the file */
|
||||
if (file->segno == 0)
|
||||
{
|
||||
/* release previous value */
|
||||
pg_free(ptrack_nonparsed);
|
||||
ptrack_nonparsed_size = 0;
|
||||
|
||||
ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid,
|
||||
file->relOid, &ptrack_nonparsed_size, backup_conn);
|
||||
}
|
||||
|
||||
if (ptrack_nonparsed != NULL)
|
||||
{
|
||||
/*
|
||||
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
|
||||
* Compute the beginning of the ptrack map related to this segment
|
||||
*
|
||||
* HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
|
||||
* RELSEG_SIZE. Number of Pages per segment: 131072
|
||||
* RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed
|
||||
* to keep track on one relsegment: 16384
|
||||
*/
|
||||
start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
|
||||
|
||||
/*
|
||||
* If file segment was created after we have read ptrack,
|
||||
* we won't have a bitmap for this segment.
|
||||
*/
|
||||
if (start_addr > ptrack_nonparsed_size)
|
||||
{
|
||||
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
|
||||
{
|
||||
file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
|
||||
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
||||
}
|
||||
else
|
||||
{
|
||||
file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
|
||||
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
||||
}
|
||||
|
||||
file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
|
||||
memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If ptrack file is missing, try to copy the entire file.
|
||||
* It can happen in two cases:
|
||||
* - files were created by commands that bypass buffer manager
|
||||
* and, correspondingly, ptrack mechanism.
|
||||
* i.e. CREATE DATABASE
|
||||
* - target relation was deleted.
|
||||
*/
|
||||
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
elog(LOG, "Pagemap compiled");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Stop WAL streaming if current 'xlogpos' exceeds 'stop_backup_lsn', which is
|
||||
* set by pg_stop_backup().
|
||||
@ -2687,7 +2310,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
|
||||
|
||||
/* check for interrupt */
|
||||
if (interrupted || thread_interrupted)
|
||||
elog(ERROR, "Interrupted during backup stop_streaming");
|
||||
elog(ERROR, "Interrupted during WAL streaming");
|
||||
|
||||
/* we assume that we get called once at the end of each segment */
|
||||
if (segment_finished)
|
||||
@ -2825,100 +2448,6 @@ StreamLog(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get lsn of the moment when ptrack was enabled the last time.
|
||||
*/
|
||||
static XLogRecPtr
|
||||
get_last_ptrack_lsn(PGconn *backup_conn)
|
||||
|
||||
{
|
||||
PGresult *res;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
res = pgut_execute(backup_conn, "select pg_catalog.pg_ptrack_control_lsn()",
|
||||
0, NULL);
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
|
||||
PQclear(res);
|
||||
return lsn;
|
||||
}
|
||||
|
||||
char *
|
||||
pg_ptrack_get_block(ConnectionArgs *arguments,
|
||||
Oid dbOid,
|
||||
Oid tblsOid,
|
||||
Oid relOid,
|
||||
BlockNumber blknum,
|
||||
size_t *result_size)
|
||||
{
|
||||
PGresult *res;
|
||||
char *params[4];
|
||||
char *result;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
params[2] = palloc(64);
|
||||
params[3] = palloc(64);
|
||||
|
||||
/*
|
||||
* Use tmp_conn, since we may work in parallel threads.
|
||||
* We can connect to any database.
|
||||
*/
|
||||
sprintf(params[0], "%i", tblsOid);
|
||||
sprintf(params[1], "%i", dbOid);
|
||||
sprintf(params[2], "%i", relOid);
|
||||
sprintf(params[3], "%u", blknum);
|
||||
|
||||
if (arguments->conn == NULL)
|
||||
{
|
||||
arguments->conn = pgut_connect(instance_config.conn_opt.pghost,
|
||||
instance_config.conn_opt.pgport,
|
||||
instance_config.conn_opt.pgdatabase,
|
||||
instance_config.conn_opt.pguser);
|
||||
}
|
||||
|
||||
if (arguments->cancel_conn == NULL)
|
||||
arguments->cancel_conn = PQgetCancel(arguments->conn);
|
||||
|
||||
//elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
|
||||
res = pgut_execute_parallel(arguments->conn,
|
||||
arguments->cancel_conn,
|
||||
"SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
|
||||
4, (const char **)params, true, false, false);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
{
|
||||
elog(VERBOSE, "cannot get file block for relation oid %u",
|
||||
relOid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (PQgetisnull(res, 0, 0))
|
||||
{
|
||||
elog(VERBOSE, "cannot get file block for relation oid %u",
|
||||
relOid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
||||
result_size);
|
||||
|
||||
PQclear(res);
|
||||
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
pfree(params[2]);
|
||||
pfree(params[3]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void
|
||||
check_external_for_tablespaces(parray *external_list, PGconn *backup_conn)
|
||||
{
|
||||
|
@ -1940,6 +1940,10 @@ pgNodeInit(PGNodeInfo *node)
|
||||
|
||||
node->server_version = 0;
|
||||
node->server_version_str[0] = '\0';
|
||||
|
||||
node->ptrack_version_num = 0;
|
||||
node->is_ptrack_enable = false;
|
||||
node->ptrack_schema = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
47
src/data.c
47
src/data.c
@ -300,9 +300,10 @@ prepare_page(ConnectionArgs *arguments,
|
||||
BlockNumber blknum, BlockNumber nblocks,
|
||||
FILE *in, BlockNumber *n_skipped,
|
||||
BackupMode backup_mode,
|
||||
Page page,
|
||||
bool strict,
|
||||
uint32 checksum_version)
|
||||
Page page, bool strict,
|
||||
uint32 checksum_version,
|
||||
int ptrack_version_num,
|
||||
const char *ptrack_schema)
|
||||
{
|
||||
XLogRecPtr page_lsn = 0;
|
||||
int try_again = 100;
|
||||
@ -319,7 +320,7 @@ prepare_page(ConnectionArgs *arguments,
|
||||
* Under high write load it's possible that we've read partly
|
||||
* flushed page, so try several times before throwing an error.
|
||||
*/
|
||||
if (backup_mode != BACKUP_MODE_DIFF_PTRACK)
|
||||
if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 20)
|
||||
{
|
||||
while(!page_is_valid && try_again)
|
||||
{
|
||||
@ -345,9 +346,9 @@ prepare_page(ConnectionArgs *arguments,
|
||||
*/
|
||||
//elog(WARNING, "Checksum_Version: %i", checksum_version ? 1 : 0);
|
||||
|
||||
if (result == -1 && is_ptrack_support && strict)
|
||||
if (result == -1 && strict && ptrack_version_num > 0)
|
||||
{
|
||||
elog(WARNING, "File \"%s\", block %u, try to fetch via SQL",
|
||||
elog(WARNING, "File \"%s\", block %u, try to fetch via shared buffer",
|
||||
file->path, blknum);
|
||||
break;
|
||||
}
|
||||
@ -358,16 +359,16 @@ prepare_page(ConnectionArgs *arguments,
|
||||
*/
|
||||
|
||||
if (!page_is_valid &&
|
||||
((strict && !is_ptrack_support) || !strict))
|
||||
((strict && ptrack_version_num == 0) || !strict))
|
||||
{
|
||||
/* show this message for checkdb or backup without ptrack support */
|
||||
/* show this message for checkdb, merge or backup without ptrack support */
|
||||
elog(WARNING, "Corruption detected in file \"%s\", block %u",
|
||||
file->path, blknum);
|
||||
}
|
||||
|
||||
/* Backup with invalid block and without ptrack support must throw error */
|
||||
if (!page_is_valid && strict && !is_ptrack_support)
|
||||
elog(ERROR, "Data file corruption. Canceling backup");
|
||||
if (!page_is_valid && strict && ptrack_version_num == 0)
|
||||
elog(ERROR, "Data file corruption, canceling backup");
|
||||
|
||||
/* Checkdb not going futher */
|
||||
if (!strict)
|
||||
@ -379,12 +380,15 @@ prepare_page(ConnectionArgs *arguments,
|
||||
}
|
||||
}
|
||||
|
||||
if (backup_mode == BACKUP_MODE_DIFF_PTRACK || (!page_is_valid && is_ptrack_support))
|
||||
if ((backup_mode == BACKUP_MODE_DIFF_PTRACK
|
||||
&& (ptrack_version_num >= 15 && ptrack_version_num < 20))
|
||||
|| !page_is_valid)
|
||||
{
|
||||
size_t page_size = 0;
|
||||
Page ptrack_page = NULL;
|
||||
ptrack_page = (Page) pg_ptrack_get_block(arguments, file->dbOid, file->tblspcOid,
|
||||
file->relOid, absolute_blknum, &page_size);
|
||||
file->relOid, absolute_blknum, &page_size,
|
||||
ptrack_version_num, ptrack_schema);
|
||||
|
||||
if (ptrack_page == NULL)
|
||||
{
|
||||
@ -530,7 +534,8 @@ bool
|
||||
backup_data_file(backup_files_arg* arguments,
|
||||
const char *to_path, pgFile *file,
|
||||
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
|
||||
CompressAlg calg, int clevel, bool missing_ok)
|
||||
CompressAlg calg, int clevel, uint32 checksum_version,
|
||||
int ptrack_version_num, const char *ptrack_schema, bool missing_ok)
|
||||
{
|
||||
FILE *in;
|
||||
FILE *out;
|
||||
@ -621,12 +626,15 @@ backup_data_file(backup_files_arg* arguments,
|
||||
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
|
||||
file->pagemap_isabsent || !file->exists_in_prev)
|
||||
{
|
||||
/* TODO: take into account PTRACK 2.0 */
|
||||
if (backup_mode != BACKUP_MODE_DIFF_PTRACK && fio_is_remote_file(in))
|
||||
{
|
||||
int rc = fio_send_pages(in, out, file,
|
||||
backup_mode == BACKUP_MODE_DIFF_DELTA && file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
|
||||
&n_blocks_skipped, calg, clevel);
|
||||
if (rc == PAGE_CHECKSUM_MISMATCH && is_ptrack_support)
|
||||
|
||||
if (rc == PAGE_CHECKSUM_MISMATCH && ptrack_version_num >= 15)
|
||||
/* only ptrack versions 1.5, 1.6, 1.7 and 2.x support this functionality */
|
||||
goto RetryUsingPtrack;
|
||||
if (rc < 0)
|
||||
elog(ERROR, "Failed to read file \"%s\": %s",
|
||||
@ -642,7 +650,9 @@ backup_data_file(backup_files_arg* arguments,
|
||||
{
|
||||
page_state = prepare_page(&(arguments->conn_arg), file, prev_backup_start_lsn,
|
||||
blknum, nblocks, in, &n_blocks_skipped,
|
||||
backup_mode, curr_page, true, current.checksum_version);
|
||||
backup_mode, curr_page, true,
|
||||
checksum_version, ptrack_version_num,
|
||||
ptrack_schema);
|
||||
compress_and_backup_page(file, blknum, in, out, &(file->crc),
|
||||
page_state, curr_page, calg, clevel);
|
||||
n_blocks_read++;
|
||||
@ -666,7 +676,9 @@ backup_data_file(backup_files_arg* arguments,
|
||||
{
|
||||
page_state = prepare_page(&(arguments->conn_arg), file, prev_backup_start_lsn,
|
||||
blknum, nblocks, in, &n_blocks_skipped,
|
||||
backup_mode, curr_page, true, current.checksum_version);
|
||||
backup_mode, curr_page, true,
|
||||
checksum_version, ptrack_version_num,
|
||||
ptrack_schema);
|
||||
compress_and_backup_page(file, blknum, in, out, &(file->crc),
|
||||
page_state, curr_page, calg, clevel);
|
||||
n_blocks_read++;
|
||||
@ -1268,7 +1280,8 @@ check_data_file(ConnectionArgs *arguments,
|
||||
{
|
||||
page_state = prepare_page(arguments, file, InvalidXLogRecPtr,
|
||||
blknum, nblocks, in, &n_blocks_skipped,
|
||||
BACKUP_MODE_FULL, curr_page, false, checksum_version);
|
||||
BACKUP_MODE_FULL, curr_page, false, checksum_version,
|
||||
0, NULL);
|
||||
|
||||
if (page_state == PageIsTruncated)
|
||||
break;
|
||||
|
19
src/dir.c
19
src/dir.c
@ -343,6 +343,16 @@ pgFileComparePath(const void *f1, const void *f2)
|
||||
return strcmp(f1p->path, f2p->path);
|
||||
}
|
||||
|
||||
/* Compare two pgFile with their path in ascending order of ASCII code. */
|
||||
int
|
||||
pgFileMapComparePath(const void *f1, const void *f2)
|
||||
{
|
||||
page_map_entry *f1p = *(page_map_entry **)f1;
|
||||
page_map_entry *f2p = *(page_map_entry **)f2;
|
||||
|
||||
return strcmp(f1p->path, f2p->path);
|
||||
}
|
||||
|
||||
/* Compare two pgFile with their name in ascending order of ASCII code. */
|
||||
int
|
||||
pgFileCompareName(const void *f1, const void *f2)
|
||||
@ -677,6 +687,13 @@ dir_check_file(pgFile *file)
|
||||
{
|
||||
if (strcmp(file->name, "pg_internal.init") == 0)
|
||||
return CHECK_FALSE;
|
||||
/* Do not backup ptrack2.x map files */
|
||||
else if (strcmp(file->name, "ptrack.map") == 0)
|
||||
return CHECK_FALSE;
|
||||
else if (strcmp(file->name, "ptrack.map.mmap") == 0)
|
||||
return CHECK_FALSE;
|
||||
else if (strcmp(file->name, "ptrack.map.tmp") == 0)
|
||||
return CHECK_FALSE;
|
||||
/* Do not backup temp files */
|
||||
else if (file->name[0] == 't' && isdigit(file->name[1]))
|
||||
return CHECK_FALSE;
|
||||
@ -734,7 +751,7 @@ dir_list_file_internal(parray *files, pgFile *parent, bool exclude,
|
||||
bool follow_symlink,
|
||||
int external_dir_num, fio_location location)
|
||||
{
|
||||
DIR *dir;
|
||||
DIR *dir;
|
||||
struct dirent *dent;
|
||||
|
||||
if (!S_ISDIR(parent->mode))
|
||||
|
@ -617,7 +617,8 @@ merge_files(void *arg)
|
||||
to_backup->backup_mode,
|
||||
to_backup->compress_alg,
|
||||
to_backup->compress_level,
|
||||
false);
|
||||
from_backup->checksum_version,
|
||||
0, NULL, false);
|
||||
|
||||
file->path = prev_path;
|
||||
|
||||
|
@ -236,22 +236,12 @@ extractPageMap(const char *archivedir, TimeLineID tli, uint32 wal_seg_size,
|
||||
XLogRecPtr startpoint, XLogRecPtr endpoint)
|
||||
{
|
||||
bool extract_isok = true;
|
||||
time_t start_time,
|
||||
end_time;
|
||||
|
||||
elog(LOG, "Compiling pagemap");
|
||||
time(&start_time);
|
||||
|
||||
extract_isok = RunXLogThreads(archivedir, 0, InvalidTransactionId,
|
||||
InvalidXLogRecPtr, tli, wal_seg_size,
|
||||
startpoint, endpoint, false, extractPageInfo,
|
||||
NULL);
|
||||
|
||||
time(&end_time);
|
||||
if (extract_isok)
|
||||
elog(LOG, "Pagemap compiled, time elapsed %.0f sec",
|
||||
difftime(end_time, start_time));
|
||||
else
|
||||
if (!extract_isok)
|
||||
elog(ERROR, "Pagemap compiling failed");
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ do { \
|
||||
/* Information about single file (or dir) in backup */
|
||||
typedef struct pgFile
|
||||
{
|
||||
char *name; /* file or directory name */
|
||||
char *name; /* file or directory name */
|
||||
mode_t mode; /* protection (file type and permission) */
|
||||
size_t size; /* size of the file */
|
||||
size_t read_size; /* size of the portion read (if only some pages are
|
||||
@ -149,27 +149,34 @@ typedef struct pgFile
|
||||
*/
|
||||
/* we need int64 here to store '-1' value */
|
||||
pg_crc32 crc; /* CRC value of the file, regular file only */
|
||||
char *linked; /* path of the linked file */
|
||||
char *linked; /* path of the linked file */
|
||||
bool is_datafile; /* true if the file is PostgreSQL data file */
|
||||
char *path; /* absolute path of the file */
|
||||
char *rel_path; /* relative path of the file */
|
||||
char *path; /* absolute path of the file */
|
||||
char *rel_path; /* relative path of the file */
|
||||
Oid tblspcOid; /* tblspcOid extracted from path, if applicable */
|
||||
Oid dbOid; /* dbOid extracted from path, if applicable */
|
||||
Oid relOid; /* relOid extracted from path, if applicable */
|
||||
char *forkName; /* forkName extracted from path, if applicable */
|
||||
char *forkName; /* forkName extracted from path, if applicable */
|
||||
int segno; /* Segment number for ptrack */
|
||||
int n_blocks; /* size of the file in blocks, readed during DELTA backup */
|
||||
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
|
||||
bool is_database;
|
||||
int external_dir_num; /* Number of external directory. 0 if not external */
|
||||
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
|
||||
CompressAlg compress_alg; /* compression algorithm applied to the file */
|
||||
volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */
|
||||
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
|
||||
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
|
||||
* i.e. datafiles without _ptrack */
|
||||
int external_dir_num; /* Number of external directory. 0 if not external */
|
||||
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
|
||||
CompressAlg compress_alg; /* compression algorithm applied to the file */
|
||||
volatile pg_atomic_flag lock;/* lock for synchronization of parallel threads */
|
||||
datapagemap_t pagemap; /* bitmap of pages updated since previous backup */
|
||||
bool pagemap_isabsent; /* Used to mark files with unknown state of pagemap,
|
||||
* i.e. datafiles without _ptrack */
|
||||
} pgFile;
|
||||
|
||||
typedef struct page_map_entry
|
||||
{
|
||||
const char *path; /* file or directory name */
|
||||
char *pagemap;
|
||||
size_t pagemapsize;
|
||||
} page_map_entry;
|
||||
|
||||
/* Special values of datapagemap_t bitmapsize */
|
||||
#define PageBitmapIsEmpty 0 /* Used to mark unchanged datafiles */
|
||||
|
||||
@ -294,6 +301,10 @@ typedef struct PGNodeInfo
|
||||
int server_version;
|
||||
char server_version_str[100];
|
||||
|
||||
int ptrack_version_num;
|
||||
bool is_ptrack_enable;
|
||||
const char *ptrack_schema; /* used only for ptrack 2.x */
|
||||
|
||||
} PGNodeInfo;
|
||||
|
||||
typedef struct pgBackup pgBackup;
|
||||
@ -412,6 +423,8 @@ typedef struct pgSetBackupParams
|
||||
|
||||
typedef struct
|
||||
{
|
||||
PGNodeInfo *nodeInfo;
|
||||
|
||||
const char *from_root;
|
||||
const char *to_root;
|
||||
const char *external_prefix;
|
||||
@ -578,7 +591,6 @@ extern bool smooth_checkpoint;
|
||||
/* remote probackup options */
|
||||
extern char* remote_agent;
|
||||
|
||||
extern bool is_ptrack_support;
|
||||
extern bool exclusive_backup;
|
||||
|
||||
/* delete options */
|
||||
@ -622,8 +634,8 @@ extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
|
||||
|
||||
extern char *pg_ptrack_get_block(ConnectionArgs *arguments,
|
||||
Oid dbOid, Oid tblsOid, Oid relOid,
|
||||
BlockNumber blknum,
|
||||
size_t *result_size);
|
||||
BlockNumber blknum, size_t *result_size,
|
||||
int ptrack_version_num, const char *ptrack_schema);
|
||||
/* in restore.c */
|
||||
extern int do_restore_or_validate(time_t target_backup_id,
|
||||
pgRecoveryTarget *rt,
|
||||
@ -797,6 +809,7 @@ extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c,
|
||||
bool raise_on_deleted, size_t *bytes_read, fio_location location);
|
||||
extern int pgFileCompareName(const void *f1, const void *f2);
|
||||
extern int pgFileComparePath(const void *f1, const void *f2);
|
||||
extern int pgFileMapComparePath(const void *f1, const void *f2);
|
||||
extern int pgFileComparePathWithExternal(const void *f1, const void *f2);
|
||||
extern int pgFileCompareRelPathWithExternal(const void *f1, const void *f2);
|
||||
extern int pgFileCompareRelPathWithExternalDesc(const void *f1, const void *f2);
|
||||
@ -813,6 +826,9 @@ extern bool backup_data_file(backup_files_arg* arguments,
|
||||
XLogRecPtr prev_backup_start_lsn,
|
||||
BackupMode backup_mode,
|
||||
CompressAlg calg, int clevel,
|
||||
uint32 checksum_version,
|
||||
int ptrack_version_num,
|
||||
const char *ptrack_schema,
|
||||
bool missing_ok);
|
||||
extern void restore_data_file(const char *to_path,
|
||||
pgFile *file, bool allow_truncate,
|
||||
@ -878,5 +894,21 @@ extern PGconn *pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeIn
|
||||
extern void check_system_identifiers(PGconn *conn, char *pgdata);
|
||||
extern void parse_filelist_filenames(parray *files, const char *root);
|
||||
|
||||
/* in ptrack.c */
|
||||
extern void make_pagemap_from_ptrack_1(parray* files, PGconn* backup_conn);
|
||||
extern void make_pagemap_from_ptrack_2(parray* files, PGconn* backup_conn,
|
||||
const char *ptrack_schema, XLogRecPtr lsn);
|
||||
extern void pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num);
|
||||
extern void get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo);
|
||||
extern bool pg_ptrack_enable(PGconn *backup_conn);
|
||||
extern bool pg_ptrack_enable2(PGconn *backup_conn);
|
||||
extern bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn);
|
||||
extern char *pg_ptrack_get_and_clear(Oid tablespace_oid,
|
||||
Oid db_oid,
|
||||
Oid rel_oid,
|
||||
size_t *result_size,
|
||||
PGconn *backup_conn);
|
||||
extern XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo);
|
||||
extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, XLogRecPtr lsn);
|
||||
|
||||
#endif /* PG_PROBACKUP_H */
|
||||
|
702
src/ptrack.c
Normal file
702
src/ptrack.c
Normal file
@ -0,0 +1,702 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* ptrack.c: support functions for ptrack backups
|
||||
*
|
||||
* Copyright (c) 2019 Postgres Professional
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#include "catalog/catalog.h"
|
||||
#endif
|
||||
#include "catalog/pg_tablespace.h"
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values synchronized with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
/*
|
||||
* Given a list of files in the instance to backup, build a pagemap for each
|
||||
* data file that has ptrack. Result is saved in the pagemap field of pgFile.
|
||||
* NOTE we rely on the fact that provided parray is sorted by file->path.
|
||||
*/
|
||||
void
|
||||
make_pagemap_from_ptrack_1(parray *files, PGconn *backup_conn)
|
||||
{
|
||||
size_t i;
|
||||
Oid dbOid_with_ptrack_init = 0;
|
||||
Oid tblspcOid_with_ptrack_init = 0;
|
||||
char *ptrack_nonparsed = NULL;
|
||||
size_t ptrack_nonparsed_size = 0;
|
||||
|
||||
for (i = 0; i < parray_num(files); i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(files, i);
|
||||
size_t start_addr;
|
||||
|
||||
/*
|
||||
* If there is a ptrack_init file in the database,
|
||||
* we must backup all its files, ignoring ptrack files for relations.
|
||||
*/
|
||||
if (file->is_database)
|
||||
{
|
||||
char *filename = strrchr(file->path, '/');
|
||||
|
||||
Assert(filename != NULL);
|
||||
filename++;
|
||||
|
||||
/*
|
||||
* The function pg_ptrack_get_and_clear_db returns true
|
||||
* if there was a ptrack_init file.
|
||||
* Also ignore ptrack files for global tablespace,
|
||||
* to avoid any possible specific errors.
|
||||
*/
|
||||
if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
|
||||
pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid, backup_conn))
|
||||
{
|
||||
dbOid_with_ptrack_init = file->dbOid;
|
||||
tblspcOid_with_ptrack_init = file->tblspcOid;
|
||||
}
|
||||
}
|
||||
|
||||
if (file->is_datafile)
|
||||
{
|
||||
if (file->tblspcOid == tblspcOid_with_ptrack_init &&
|
||||
file->dbOid == dbOid_with_ptrack_init)
|
||||
{
|
||||
/* ignore ptrack if ptrack_init exists */
|
||||
elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* get ptrack bitmap once for all segments of the file */
|
||||
if (file->segno == 0)
|
||||
{
|
||||
/* release previous value */
|
||||
pg_free(ptrack_nonparsed);
|
||||
ptrack_nonparsed_size = 0;
|
||||
|
||||
ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid,
|
||||
file->relOid, &ptrack_nonparsed_size, backup_conn);
|
||||
}
|
||||
|
||||
if (ptrack_nonparsed != NULL)
|
||||
{
|
||||
/*
|
||||
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
|
||||
* Compute the beginning of the ptrack map related to this segment
|
||||
*
|
||||
* HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
|
||||
* RELSEG_SIZE. Number of Pages per segment: 131072
|
||||
* RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed
|
||||
* to keep track on one relsegment: 16384
|
||||
*/
|
||||
start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
|
||||
|
||||
/*
|
||||
* If file segment was created after we have read ptrack,
|
||||
* we won't have a bitmap for this segment.
|
||||
*/
|
||||
if (start_addr > ptrack_nonparsed_size)
|
||||
{
|
||||
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
|
||||
{
|
||||
file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
|
||||
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
||||
}
|
||||
else
|
||||
{
|
||||
file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
|
||||
elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
|
||||
}
|
||||
|
||||
file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
|
||||
memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If ptrack file is missing, try to copy the entire file.
|
||||
* It can happen in two cases:
|
||||
* - files were created by commands that bypass buffer manager
|
||||
* and, correspondingly, ptrack mechanism.
|
||||
* i.e. CREATE DATABASE
|
||||
* - target relation was deleted.
|
||||
*/
|
||||
elog(VERBOSE, "Ptrack is missing for file: %s", file->path);
|
||||
file->pagemap_isabsent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the instance supports compatible version of ptrack,
|
||||
* fill-in version number if it does.
|
||||
* Also for ptrack 2.x save schema namespace.
|
||||
*/
|
||||
void
|
||||
get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
{
|
||||
PGresult *res_db;
|
||||
char *ptrack_version_str;
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT extnamespace::regnamespace, extversion "
|
||||
"FROM pg_catalog.pg_extension WHERE extname = 'ptrack'",
|
||||
0, NULL);
|
||||
|
||||
if (PQntuples(res_db) > 0)
|
||||
{
|
||||
/* ptrack 2.x is supported, save schema name and version */
|
||||
nodeInfo->ptrack_schema = pgut_strdup(PQgetvalue(res_db, 0, 0));
|
||||
|
||||
if (nodeInfo->ptrack_schema == NULL)
|
||||
elog(ERROR, "Failed to obtain schema name of ptrack extension");
|
||||
|
||||
ptrack_version_str = PQgetvalue(res_db, 0, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* ptrack 1.x is supported, save version */
|
||||
PQclear(res_db);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT proname FROM pg_proc WHERE proname='ptrack_version'",
|
||||
0, NULL);
|
||||
|
||||
if (PQntuples(res_db) == 0)
|
||||
{
|
||||
/* ptrack is not supported */
|
||||
PQclear(res_db);
|
||||
return;
|
||||
}
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT pg_catalog.ptrack_version()",
|
||||
0, NULL);
|
||||
if (PQntuples(res_db) == 0)
|
||||
{
|
||||
/* TODO: Something went wrong, should we error out here? */
|
||||
PQclear(res_db);
|
||||
return;
|
||||
}
|
||||
ptrack_version_str = PQgetvalue(res_db, 0, 0);
|
||||
}
|
||||
|
||||
if (strcmp(ptrack_version_str, "1.5") == 0)
|
||||
nodeInfo->ptrack_version_num = 15;
|
||||
else if (strcmp(ptrack_version_str, "1.6") == 0)
|
||||
nodeInfo->ptrack_version_num = 16;
|
||||
else if (strcmp(ptrack_version_str, "1.7") == 0)
|
||||
nodeInfo->ptrack_version_num = 17;
|
||||
else if (strcmp(ptrack_version_str, "2.0") == 0)
|
||||
nodeInfo->ptrack_version_num = 20;
|
||||
else
|
||||
elog(WARNING, "Update your ptrack to the version 1.5 or upper. Current version is %s",
|
||||
ptrack_version_str);
|
||||
|
||||
PQclear(res_db);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if ptrack is enabled in target instance
|
||||
*/
|
||||
bool
|
||||
pg_ptrack_enable(PGconn *backup_conn)
|
||||
{
|
||||
PGresult *res_db;
|
||||
|
||||
res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL);
|
||||
|
||||
if (strcmp(PQgetvalue(res_db, 0, 0), "on") != 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/* ----------------------------
|
||||
* Ptrack 1.* support functions
|
||||
* ----------------------------
|
||||
*/
|
||||
|
||||
/* Clear ptrack files in all databases of the instance we connected to */
|
||||
void
|
||||
pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num)
|
||||
{
|
||||
PGresult *res_db,
|
||||
*res;
|
||||
const char *dbname;
|
||||
int i;
|
||||
Oid dbOid, tblspcOid;
|
||||
char *params[2];
|
||||
|
||||
// FIXME Perform this check on caller's side
|
||||
if (ptrack_version_num >= 20)
|
||||
return;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
|
||||
0, NULL);
|
||||
|
||||
for(i = 0; i < PQntuples(res_db); i++)
|
||||
{
|
||||
PGconn *tmp_conn;
|
||||
|
||||
dbname = PQgetvalue(res_db, i, 0);
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
continue;
|
||||
|
||||
dbOid = atoi(PQgetvalue(res_db, i, 1));
|
||||
tblspcOid = atoi(PQgetvalue(res_db, i, 2));
|
||||
|
||||
tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
|
||||
dbname,
|
||||
instance_config.conn_opt.pguser);
|
||||
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
|
||||
0, NULL);
|
||||
PQclear(res);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params);
|
||||
PQclear(res);
|
||||
|
||||
pgut_disconnect(tmp_conn);
|
||||
}
|
||||
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
PQclear(res_db);
|
||||
}
|
||||
|
||||
bool
|
||||
pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn)
|
||||
{
|
||||
char *params[2];
|
||||
char *dbname;
|
||||
PGresult *res_db;
|
||||
PGresult *res;
|
||||
bool result;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT datname FROM pg_database WHERE oid=$1",
|
||||
1, (const char **) params);
|
||||
/*
|
||||
* If database is not found, it's not an error.
|
||||
* It could have been deleted since previous backup.
|
||||
*/
|
||||
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
||||
return false;
|
||||
|
||||
dbname = PQgetvalue(res_db, 0, 0);
|
||||
|
||||
/* Always backup all files from template0 database */
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
PQclear(res_db);
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
sprintf(params[1], "%i", tblspcOid);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()");
|
||||
|
||||
if (!parse_bool(PQgetvalue(res, 0, 0), &result))
|
||||
elog(ERROR,
|
||||
"result of pg_ptrack_get_and_clear_db() is invalid: %s",
|
||||
PQgetvalue(res, 0, 0));
|
||||
|
||||
PQclear(res);
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Read and clear ptrack files of the target relation.
|
||||
* Result is a bytea ptrack map of all segments of the target relation.
|
||||
* case 1: we know a tablespace_oid, db_oid, and rel_filenode
|
||||
* case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default)
|
||||
* case 3: we know only rel_filenode (because file in pg_global)
|
||||
*/
|
||||
char *
|
||||
pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
|
||||
size_t *result_size, PGconn *backup_conn)
|
||||
{
|
||||
PGconn *tmp_conn;
|
||||
PGresult *res_db,
|
||||
*res;
|
||||
char *params[2];
|
||||
char *result;
|
||||
char *val;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
|
||||
/* regular file (not in directory 'global') */
|
||||
if (db_oid != 0)
|
||||
{
|
||||
char *dbname;
|
||||
|
||||
sprintf(params[0], "%i", db_oid);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT datname FROM pg_database WHERE oid=$1",
|
||||
1, (const char **) params);
|
||||
/*
|
||||
* If database is not found, it's not an error.
|
||||
* It could have been deleted since previous backup.
|
||||
*/
|
||||
if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
|
||||
return NULL;
|
||||
|
||||
dbname = PQgetvalue(res_db, 0, 0);
|
||||
|
||||
if (strcmp(dbname, "template0") == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
|
||||
dbname,
|
||||
instance_config.conn_opt.pguser);
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u",
|
||||
dbname, tablespace_oid, rel_filenode);
|
||||
PQclear(res_db);
|
||||
pgut_disconnect(tmp_conn);
|
||||
}
|
||||
/* file in directory 'global' */
|
||||
else
|
||||
{
|
||||
/*
|
||||
* execute ptrack_get_and_clear for relation in pg_global
|
||||
* Use backup_conn, cause we can do it from any database.
|
||||
*/
|
||||
sprintf(params[0], "%i", tablespace_oid);
|
||||
sprintf(params[1], "%i", rel_filenode);
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
|
||||
2, (const char **)params);
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u",
|
||||
rel_filenode);
|
||||
}
|
||||
|
||||
val = PQgetvalue(res, 0, 0);
|
||||
|
||||
/* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x.
|
||||
* It should be fixed in future ptrack releases, but till then we
|
||||
* can parse it.
|
||||
*/
|
||||
if (strcmp("x", val+1) == 0)
|
||||
{
|
||||
/* Ptrack file is missing */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
||||
result_size);
|
||||
PQclear(res);
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get lsn of the moment when ptrack was enabled the last time.
|
||||
*/
|
||||
XLogRecPtr
|
||||
get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
|
||||
{
|
||||
PGresult *res;
|
||||
uint32 lsn_hi;
|
||||
uint32 lsn_lo;
|
||||
XLogRecPtr lsn;
|
||||
|
||||
if (nodeInfo->ptrack_version_num < 20)
|
||||
res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_control_lsn()",
|
||||
0, NULL);
|
||||
else
|
||||
{
|
||||
char query[128];
|
||||
|
||||
sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema);
|
||||
res = pgut_execute(backup_conn, query, 0, NULL);
|
||||
}
|
||||
|
||||
/* Extract timeline and LSN from results of pg_start_backup() */
|
||||
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
|
||||
/* Calculate LSN */
|
||||
lsn = ((uint64) lsn_hi) << 32 | lsn_lo;
|
||||
|
||||
PQclear(res);
|
||||
return lsn;
|
||||
}
|
||||
|
||||
char *
|
||||
pg_ptrack_get_block(ConnectionArgs *arguments,
|
||||
Oid dbOid,
|
||||
Oid tblsOid,
|
||||
Oid relOid,
|
||||
BlockNumber blknum,
|
||||
size_t *result_size,
|
||||
int ptrack_version_num,
|
||||
const char *ptrack_schema)
|
||||
{
|
||||
PGresult *res;
|
||||
char *params[4];
|
||||
char *result;
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
params[2] = palloc(64);
|
||||
params[3] = palloc(64);
|
||||
|
||||
/*
|
||||
* Use tmp_conn, since we may work in parallel threads.
|
||||
* We can connect to any database.
|
||||
*/
|
||||
sprintf(params[0], "%i", tblsOid);
|
||||
sprintf(params[1], "%i", dbOid);
|
||||
sprintf(params[2], "%i", relOid);
|
||||
sprintf(params[3], "%u", blknum);
|
||||
|
||||
if (arguments->conn == NULL)
|
||||
{
|
||||
arguments->conn = pgut_connect(instance_config.conn_opt.pghost,
|
||||
instance_config.conn_opt.pgport,
|
||||
instance_config.conn_opt.pgdatabase,
|
||||
instance_config.conn_opt.pguser);
|
||||
}
|
||||
|
||||
if (arguments->cancel_conn == NULL)
|
||||
arguments->cancel_conn = PQgetCancel(arguments->conn);
|
||||
|
||||
//elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
|
||||
|
||||
if (ptrack_version_num < 20)
|
||||
res = pgut_execute_parallel(arguments->conn,
|
||||
arguments->cancel_conn,
|
||||
"SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
|
||||
4, (const char **)params, true, false, false);
|
||||
else
|
||||
{
|
||||
char query[128];
|
||||
|
||||
/* sanity */
|
||||
if (!ptrack_schema)
|
||||
elog(ERROR, "Schema name of ptrack extension is missing");
|
||||
|
||||
sprintf(query, "SELECT %s.pg_ptrack_get_block($1, $2, $3, $4)", ptrack_schema);
|
||||
|
||||
res = pgut_execute_parallel(arguments->conn,
|
||||
arguments->cancel_conn,
|
||||
query, 4, (const char **)params,
|
||||
true, false, false);
|
||||
}
|
||||
|
||||
if (PQnfields(res) != 1)
|
||||
{
|
||||
elog(VERBOSE, "cannot get file block for relation oid %u",
|
||||
relOid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (PQgetisnull(res, 0, 0))
|
||||
{
|
||||
elog(VERBOSE, "cannot get file block for relation oid %u",
|
||||
relOid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
|
||||
result_size);
|
||||
|
||||
PQclear(res);
|
||||
|
||||
pfree(params[0]);
|
||||
pfree(params[1]);
|
||||
pfree(params[2]);
|
||||
pfree(params[3]);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ----------------------------
|
||||
* Ptrack 2.* support functions
|
||||
* ----------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* Check if ptrack is enabled in target instance
|
||||
*/
|
||||
bool
|
||||
pg_ptrack_enable2(PGconn *backup_conn)
|
||||
{
|
||||
PGresult *res_db;
|
||||
|
||||
res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL);
|
||||
|
||||
if (strcmp(PQgetvalue(res_db, 0, 0), "0") == 0)
|
||||
{
|
||||
PQclear(res_db);
|
||||
return false;
|
||||
}
|
||||
PQclear(res_db);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch a list of changed files with their ptrack maps.
|
||||
*/
|
||||
parray *
|
||||
pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, XLogRecPtr lsn)
|
||||
{
|
||||
PGresult *res;
|
||||
char lsn_buf[17 + 1];
|
||||
char *params[1];
|
||||
parray *pagemapset = NULL;
|
||||
int i;
|
||||
char query[512];
|
||||
|
||||
snprintf(lsn_buf, sizeof lsn_buf, "%X/%X", (uint32) (lsn >> 32), (uint32) lsn);
|
||||
params[0] = pstrdup(lsn_buf);
|
||||
|
||||
if (!ptrack_schema)
|
||||
elog(ERROR, "Schema name of ptrack extension is missing");
|
||||
|
||||
sprintf(query, "SELECT path, pagemap FROM %s.pg_ptrack_get_pagemapset($1) ORDER BY 1",
|
||||
ptrack_schema);
|
||||
|
||||
res = pgut_execute(backup_conn, query, 1, (const char **) params);
|
||||
pfree(params[0]);
|
||||
|
||||
if (PQnfields(res) != 2)
|
||||
elog(ERROR, "cannot get ptrack pagemapset");
|
||||
|
||||
/* sanity ? */
|
||||
|
||||
/* Construct database map */
|
||||
for (i = 0; i < PQntuples(res); i++)
|
||||
{
|
||||
page_map_entry *pm_entry = (page_map_entry *) pgut_malloc(sizeof(page_map_entry));
|
||||
|
||||
/* get path */
|
||||
pm_entry->path = pgut_strdup(PQgetvalue(res, i, 0));
|
||||
|
||||
/* get bytea */
|
||||
pm_entry->pagemap = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, i, 1),
|
||||
&pm_entry->pagemapsize);
|
||||
|
||||
if (pagemapset == NULL)
|
||||
pagemapset = parray_new();
|
||||
|
||||
parray_append(pagemapset, pm_entry);
|
||||
}
|
||||
|
||||
PQclear(res);
|
||||
|
||||
return pagemapset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a list of files in the instance to backup, build a pagemap for each
|
||||
* data file that has ptrack. Result is saved in the pagemap field of pgFile.
|
||||
*
|
||||
* We fetch a list of changed files with their ptrack maps. After that files
|
||||
* are merged with their bitmaps. File without bitmap is treated as unchanged.
|
||||
*/
|
||||
void
|
||||
make_pagemap_from_ptrack_2(parray *files,
|
||||
PGconn *backup_conn,
|
||||
const char *ptrack_schema,
|
||||
XLogRecPtr lsn)
|
||||
{
|
||||
parray *filemaps;
|
||||
int file_i = 0;
|
||||
page_map_entry *dummy_map = NULL;
|
||||
|
||||
/* Receive all available ptrack bitmaps at once */
|
||||
filemaps = pg_ptrack_get_pagemapset(backup_conn, ptrack_schema, lsn);
|
||||
|
||||
if (filemaps != NULL)
|
||||
parray_qsort(filemaps, pgFileMapComparePath);
|
||||
else
|
||||
return;
|
||||
|
||||
dummy_map = (page_map_entry *) pgut_malloc(sizeof(page_map_entry));
|
||||
|
||||
/* Iterate over files and look for corresponding pagemap if any */
|
||||
for (file_i = 0; file_i < parray_num(files); file_i++)
|
||||
{
|
||||
pgFile *file = (pgFile *) parray_get(files, file_i);
|
||||
page_map_entry **res_map = NULL;
|
||||
page_map_entry *map = NULL;
|
||||
|
||||
/*
|
||||
* For now nondata files are not entitled to have pagemap
|
||||
* TODO It's possible to use ptrack for incremental backup of
|
||||
* relation forks. Not implemented yet.
|
||||
*/
|
||||
if (!file->is_datafile || file->is_cfs)
|
||||
continue;
|
||||
|
||||
/* Consider only files from PGDATA (this check is probably redundant) */
|
||||
if (file->external_dir_num != 0)
|
||||
continue;
|
||||
|
||||
if (filemaps)
|
||||
{
|
||||
dummy_map->path = file->rel_path;
|
||||
res_map = parray_bsearch(filemaps, dummy_map, pgFileMapComparePath);
|
||||
map = (res_map) ? *res_map : NULL;
|
||||
}
|
||||
|
||||
/* Found map */
|
||||
if (map)
|
||||
{
|
||||
elog(VERBOSE, "Using ptrack pagemap for file \"%s\"", file->rel_path);
|
||||
file->pagemap.bitmapsize = map->pagemapsize;
|
||||
file->pagemap.bitmap = map->pagemap;
|
||||
}
|
||||
}
|
||||
|
||||
free(dummy_map);
|
||||
}
|
@ -34,7 +34,7 @@ Run ptrack tests:
|
||||
|
||||
|
||||
Usage:
|
||||
pip install testgres
|
||||
pip install testgres==1.8.2
|
||||
export PG_CONFIG=/path/to/pg_config
|
||||
python -m unittest [-v] tests[.specific_module][.class.test]
|
||||
```
|
||||
|
@ -579,6 +579,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
log_file = os.path.join(node.logs_dir, 'postgresql.log')
|
||||
with open(log_file, 'r') as f:
|
||||
log_content = f.read()
|
||||
self.assertIn(
|
||||
'Cannot open destination temporary WAL file',
|
||||
log_content)
|
||||
|
||||
self.assertIn(
|
||||
'Reusing stale destination temporary WAL file',
|
||||
log_content)
|
||||
@ -1320,6 +1324,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
replica.pgbench_init(scale=2)
|
||||
|
||||
sleep(5)
|
||||
|
||||
show = self.show_archive(backup_dir, as_text=True)
|
||||
show = self.show_archive(backup_dir)
|
||||
|
||||
|
183
tests/backup.py
183
tests/backup.py
@ -24,8 +24,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -33,6 +32,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
show_backup = self.show_pb(backup_dir, 'node')[0]
|
||||
|
||||
@ -121,7 +125,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -129,6 +133,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
try:
|
||||
self.backup_node(backup_dir, 'node', node, backup_type="page")
|
||||
# we should die here because exception is what we expect to happen
|
||||
@ -244,7 +253,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -252,6 +261,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4"])
|
||||
@ -276,14 +290,18 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
@ -307,6 +325,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -315,6 +334,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4", "--stream"])
|
||||
@ -324,9 +348,10 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT;")
|
||||
"CHECKPOINT")
|
||||
|
||||
heap_path = node.safe_psql(
|
||||
"postgres",
|
||||
@ -340,12 +365,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream", "--log-level-file=verbose"])
|
||||
options=["-j", "4", "--stream", "--log-level-file=VERBOSE"])
|
||||
|
||||
# open log file and check
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
log_content = f.read()
|
||||
self.assertIn('block 1, try to fetch via SQL', log_content)
|
||||
self.assertIn('block 1, try to fetch via shared buffer', log_content)
|
||||
self.assertIn('SELECT pg_catalog.pg_ptrack_get_block', log_content)
|
||||
f.close
|
||||
|
||||
@ -366,6 +391,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -374,6 +400,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
@ -414,12 +445,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
if self.remote:
|
||||
self.assertTrue(
|
||||
"WARNING: File" in e.message and
|
||||
"try to fetch via SQL" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block_2" in e.message,
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
else:
|
||||
@ -427,12 +458,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"LOG: File" in e.message and
|
||||
"blknum" in e.message and
|
||||
"have wrong checksum" in e.message and
|
||||
"try to fetch via SQL" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block_2" in e.message,
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
@ -450,6 +481,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=self.ptrack,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -458,6 +490,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if self.ptrack and node.major_version > 11:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension ptrack")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
@ -922,7 +959,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# DELTA backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
gdb=True, options=['--log-level-file=verbose'])
|
||||
gdb=True, options=['--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('backup_files')
|
||||
gdb.run_until_break()
|
||||
@ -989,7 +1026,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# PAGE backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
gdb=True, options=['--log-level-file=verbose'])
|
||||
gdb=True, options=['--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('backup_files')
|
||||
gdb.run_until_break()
|
||||
@ -1029,15 +1066,19 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i"
|
||||
@ -1055,7 +1096,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# PTRACK backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
gdb=True, options=['--log-level-file=verbose'])
|
||||
gdb=True, options=['--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('backup_files')
|
||||
gdb.run_until_break()
|
||||
@ -1304,7 +1345,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# FULL backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True,
|
||||
options=['--stream', '--log-level-file=verbose'])
|
||||
options=['--stream', '--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.run_until_break()
|
||||
@ -1342,7 +1383,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# FULL backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True,
|
||||
options=['--stream', '--log-level-file=verbose'])
|
||||
options=['--stream', '--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.run_until_break()
|
||||
@ -1379,8 +1420,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# FULL backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True,
|
||||
options=['--stream', '--log-level-file=verbose'])
|
||||
backup_dir, 'node', node, gdb=True, options=['--stream'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.run_until_break()
|
||||
@ -1534,12 +1574,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=self.ptrack,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'archive_timeout': '30s'})
|
||||
|
||||
if self.ptrack:
|
||||
self.set_auto_conf(node, {'ptrack_enable': 'on'})
|
||||
pg_options={'archive_timeout': '30s'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -1550,6 +1587,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
'postgres',
|
||||
'CREATE DATABASE backupdb')
|
||||
|
||||
if self.ptrack and node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
# PG 9.5
|
||||
if self.get_version(node) < 90600:
|
||||
node.safe_psql(
|
||||
@ -1639,6 +1681,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"GRANT CONNECT ON DATABASE backupdb to backup; "
|
||||
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
|
||||
@ -1654,22 +1697,42 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.ptrack:
|
||||
for fname in [
|
||||
'pg_catalog.oideq(oid, oid)',
|
||||
'pg_catalog.ptrack_version()',
|
||||
'pg_catalog.pg_ptrack_clear()',
|
||||
if node.major_version < 12:
|
||||
for fname in [
|
||||
'pg_catalog.oideq(oid, oid)',
|
||||
'pg_catalog.ptrack_version()',
|
||||
'pg_catalog.pg_ptrack_clear()',
|
||||
'pg_catalog.pg_ptrack_control_lsn()',
|
||||
'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)',
|
||||
'pg_catalog.pg_stop_backup()']:
|
||||
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION {0} "
|
||||
"TO backup".format(fname))
|
||||
else:
|
||||
fnames = [
|
||||
'pg_catalog.pg_ptrack_get_pagemapset(pg_lsn)',
|
||||
'pg_catalog.pg_ptrack_control_lsn()',
|
||||
'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)',
|
||||
'pg_catalog.pg_stop_backup()']:
|
||||
# try:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION {0} "
|
||||
"TO backup".format(fname))
|
||||
# except:
|
||||
# pass
|
||||
'pg_catalog.pg_ptrack_get_block(oid, oid, oid, bigint)'
|
||||
]
|
||||
|
||||
for fname in fnames:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION {0} "
|
||||
"TO backup".format(fname))
|
||||
|
||||
if ProbackupTest.enterprise:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup")
|
||||
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup")
|
||||
|
||||
# FULL backup
|
||||
self.backup_node(
|
||||
@ -1903,14 +1966,12 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=self.ptrack,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'archive_timeout': '30s',
|
||||
'checkpoint_timeout': '1h'})
|
||||
|
||||
if self.ptrack:
|
||||
self.set_auto_conf(node, {'ptrack_enable': 'on'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
@ -1920,6 +1981,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
'postgres',
|
||||
'CREATE DATABASE backupdb')
|
||||
|
||||
if self.ptrack and node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
'backupdb',
|
||||
'CREATE EXTENSION ptrack')
|
||||
|
||||
# PG 9.5
|
||||
if self.get_version(node) < 90600:
|
||||
node.safe_psql(
|
||||
@ -2006,6 +2072,15 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
datname='backupdb', options=['--stream', '-U', 'backup'])
|
||||
|
||||
# PTRACK
|
||||
if self.ptrack:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
datname='backupdb', options=['-U', 'backup'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
datname='backupdb', options=['--stream', '-U', 'backup'])
|
||||
|
||||
if self.get_version(node) < 90600:
|
||||
self.del_test_dir(module_name, fname)
|
||||
return
|
||||
@ -2062,6 +2137,15 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', replica, backup_type='delta',
|
||||
datname='backupdb', options=['--stream', '-U', 'backup'])
|
||||
|
||||
# PTRACK backup from replica
|
||||
if self.ptrack:
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='delta',
|
||||
datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s'])
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='delta',
|
||||
datname='backupdb', options=['--stream', '-U', 'backup'])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -2268,6 +2352,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_streaming_timeout(self):
|
||||
"""
|
||||
Illustrate the problem of loosing exact error
|
||||
message because our WAL streaming engine is "borrowed"
|
||||
from pg_receivexlog
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -2304,5 +2391,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
'could not receive data from WAL stream',
|
||||
log_content)
|
||||
|
||||
self.assertIn(
|
||||
'ERROR: Problem in receivexlog',
|
||||
log_content)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -220,10 +220,10 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'autovacuum': 'off',
|
||||
'ptrack_enable': 'on'})
|
||||
'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
@ -254,8 +254,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
|
@ -194,8 +194,8 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=self.ptrack,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -203,6 +203,11 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'CREATE EXTENSION ptrack')
|
||||
|
||||
# full backup mode
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
# ptrack backup mode
|
||||
|
@ -28,7 +28,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off'})
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
@ -105,7 +104,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
@ -193,7 +191,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
@ -493,18 +490,14 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s'
|
||||
}
|
||||
)
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node_restored'),
|
||||
)
|
||||
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
||||
node_restored.cleanup()
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.slow_start()
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
@ -577,7 +570,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_size': '10GB',
|
||||
'checkpoint_timeout': '5min',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
@ -1059,7 +1051,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_delta_corruption_heal_via_ptrack_1(self):
|
||||
def test_delta_corruption_heal_via_ptrack(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
@ -1076,6 +1068,11 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4", "--stream"])
|
||||
@ -1107,7 +1104,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
# open log file and check
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
log_content = f.read()
|
||||
self.assertIn('block 1, try to fetch via SQL', log_content)
|
||||
self.assertIn('block 1, try to fetch via shared buffer', log_content)
|
||||
self.assertIn('SELECT pg_catalog.pg_ptrack_get_block', log_content)
|
||||
f.close
|
||||
|
||||
@ -1119,7 +1116,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_corruption_heal_via_ptrack_2(self):
|
||||
def test_page_corruption_heal_via_ptrack(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
@ -1136,6 +1133,11 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
@ -1176,7 +1178,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
if self.remote:
|
||||
self.assertTrue(
|
||||
"LOG: File" in e.message and
|
||||
"try to fetch via SQL" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
@ -1189,7 +1191,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
"WARNING: File" in e.message and
|
||||
"blknum" in e.message and
|
||||
"have wrong checksum" in e.message and
|
||||
"try to fetch via SQL" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
|
@ -8,7 +8,7 @@ import shutil
|
||||
|
||||
module_name = 'external'
|
||||
|
||||
|
||||
# TODO: add some ptrack tests
|
||||
class ExternalTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
|
@ -117,17 +117,17 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
if self.pg_config_version > self.version_to_num('11.0'):
|
||||
return unittest.skip('You need PostgreSQL =< 11 for this test')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'ptrack_enable': 'on'
|
||||
}
|
||||
)
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
@ -202,17 +202,17 @@ class FalsePositive(ProbackupTest, unittest.TestCase):
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
if self.pg_config_version > self.version_to_num('11.0'):
|
||||
return unittest.skip('You need PostgreSQL =< 11 for this test')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'ptrack_enable': 'on'
|
||||
}
|
||||
)
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
|
@ -311,6 +311,7 @@ class ProbackupTest(object):
|
||||
self,
|
||||
base_dir=None,
|
||||
set_replication=False,
|
||||
ptrack_enable=False,
|
||||
initdb_params=[],
|
||||
pg_options={}):
|
||||
|
||||
@ -325,6 +326,10 @@ class ProbackupTest(object):
|
||||
node.init(
|
||||
initdb_params=initdb_params, allow_streaming=set_replication)
|
||||
|
||||
# set major version
|
||||
with open(os.path.join(node.data_dir, 'PG_VERSION')) as f:
|
||||
node.major_version = int(f.read().rstrip())
|
||||
|
||||
# Sane default parameters
|
||||
options = {}
|
||||
options['max_connections'] = 100
|
||||
@ -345,16 +350,18 @@ class ProbackupTest(object):
|
||||
if set_replication:
|
||||
options['max_wal_senders'] = 10
|
||||
|
||||
if ptrack_enable:
|
||||
if node.major_version > 11:
|
||||
options['ptrack_map_size'] = '128MB'
|
||||
else:
|
||||
options['ptrack_enable'] = 'on'
|
||||
|
||||
# set default values
|
||||
self.set_auto_conf(node, options)
|
||||
|
||||
# Apply given parameters
|
||||
self.set_auto_conf(node, pg_options)
|
||||
|
||||
# set major version
|
||||
with open(os.path.join(node.data_dir, 'PG_VERSION')) as f:
|
||||
node.major_version = f.read().rstrip()
|
||||
|
||||
return node
|
||||
|
||||
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
|
||||
@ -482,6 +489,31 @@ class ProbackupTest(object):
|
||||
os.close(file)
|
||||
return ptrack_bits_for_fork
|
||||
|
||||
def check_ptrack_map_sanity(self, node, idx_ptrack):
|
||||
if node.major_version >= 12:
|
||||
return
|
||||
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
# update path to heap and index files in case they`ve changed
|
||||
idx_ptrack[i]['path'] = self.get_fork_path(node, i)
|
||||
# calculate new md5sums for pages
|
||||
idx_ptrack[i]['new_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files')
|
||||
|
||||
def check_ptrack_sanity(self, idx_dict):
|
||||
success = True
|
||||
if idx_dict['new_size'] > idx_dict['old_size']:
|
||||
@ -1157,10 +1189,10 @@ class ProbackupTest(object):
|
||||
|
||||
return restore_command
|
||||
|
||||
def set_auto_conf(self, node, options):
|
||||
def set_auto_conf(self, node, options, config='postgresql.auto.conf'):
|
||||
|
||||
# parse postgresql.auto.conf
|
||||
path = os.path.join(node.data_dir, 'postgresql.auto.conf')
|
||||
path = os.path.join(node.data_dir, config)
|
||||
|
||||
with open(path, 'r') as f:
|
||||
raw_content = f.read()
|
||||
@ -1220,11 +1252,18 @@ class ProbackupTest(object):
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
config = 'postgresql.auto.conf'
|
||||
probackup_recovery_path = os.path.join(replica.data_dir, 'probackup_recovery.conf')
|
||||
if os.path.exists(probackup_recovery_path):
|
||||
if os.stat(probackup_recovery_path).st_size > 0:
|
||||
config = 'probackup_recovery.conf'
|
||||
|
||||
self.set_auto_conf(
|
||||
replica,
|
||||
{'primary_conninfo': 'user={0} port={1} application_name={2} '
|
||||
' sslmode=prefer sslcompression=1'.format(
|
||||
self.user, master.port, replica_name)})
|
||||
self.user, master.port, replica_name)},
|
||||
config)
|
||||
else:
|
||||
replica.append_conf('recovery.conf', 'standby_mode = on')
|
||||
replica.append_conf(
|
||||
@ -1401,7 +1440,7 @@ class ProbackupTest(object):
|
||||
'backup_label', 'tablespace_map', 'recovery.conf',
|
||||
'ptrack_control', 'ptrack_init', 'pg_control',
|
||||
'probackup_recovery.conf', 'recovery.signal',
|
||||
'standby.signal'
|
||||
'standby.signal', 'ptrack.map', 'ptrack.map.mmap'
|
||||
]
|
||||
|
||||
if exclude_dirs:
|
||||
|
@ -823,20 +823,19 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '300s',
|
||||
'autovacuum': 'off',
|
||||
'ptrack_enable': 'on'
|
||||
}
|
||||
)
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
||||
ptrack_enable=True,
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
node.safe_psql(
|
||||
@ -871,6 +870,10 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
||||
node_restored.cleanup()
|
||||
|
||||
old_tablespace = self.get_tblspace_path(node, 'somedata')
|
||||
new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
|
||||
|
||||
@ -878,8 +881,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node_restored,
|
||||
options=[
|
||||
"-j", "4",
|
||||
"-T", "{0}={1}".format(old_tablespace, new_tablespace),
|
||||
"--recovery-target-action=promote"])
|
||||
"-T", "{0}={1}".format(old_tablespace, new_tablespace)])
|
||||
|
||||
# Physical comparison
|
||||
if self.paranoia:
|
||||
|
@ -12,9 +12,6 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_help_1(self):
|
||||
"""help options"""
|
||||
self.maxDiff = None
|
||||
fname = self.id().split(".")[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
with open(os.path.join(self.dir_path, "expected/option_help.out"), "rb") as help_out:
|
||||
self.assertEqual(
|
||||
self.run_pb(["--help"]),
|
||||
@ -24,8 +21,6 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_version_2(self):
|
||||
"""help options"""
|
||||
fname = self.id().split(".")[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out:
|
||||
self.assertIn(
|
||||
version_out.read().decode("utf-8"),
|
||||
@ -35,8 +30,6 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_without_backup_path_3(self):
|
||||
"""backup command failure without backup mode option"""
|
||||
fname = self.id().split(".")[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
try:
|
||||
self.run_pb(["backup", "-b", "full"])
|
||||
self.assertEqual(1, 0, "Expecting Error because '-B' parameter is not specified.\n Output: {0} \n CMD: {1}".format(
|
||||
|
@ -129,9 +129,9 @@ class BugTest(ProbackupTest, unittest.TestCase):
|
||||
recovery_config = 'recovery.conf'
|
||||
|
||||
replica.append_conf(
|
||||
'recovery.conf', "recovery_target = 'immediate'")
|
||||
recovery_config, "recovery_target = 'immediate'")
|
||||
replica.append_conf(
|
||||
'recovery.conf', "recovery_target_action = 'pause'")
|
||||
recovery_config, "recovery_target_action = 'pause'")
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
if self.get_version(node) < 100000:
|
||||
|
1950
tests/ptrack.py
1950
tests/ptrack.py
File diff suppressed because it is too large
Load Diff
@ -23,24 +23,28 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
if self.pg_config_version > self.version_to_num('9.6.0'):
|
||||
return unittest.skip(
|
||||
'Skipped because backup from replica is not supported in PG 9.5')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
master = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'master'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
if self.get_version(master) < self.version_to_num('9.6.0'):
|
||||
self.del_test_dir(module_name, fname)
|
||||
return unittest.skip(
|
||||
'Skipped because backup from replica is not supported in PG 9.5')
|
||||
|
||||
master.slow_start()
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
|
||||
master.slow_start()
|
||||
|
||||
if master.major_version >= 12:
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
# CREATE TABLE
|
||||
master.psql(
|
||||
"postgres",
|
||||
@ -427,6 +431,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
data_dir=replica.data_dir,
|
||||
backup_type='page', options=['--archive-timeout=60s'])
|
||||
|
||||
sleep(1)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
backup_type='delta', options=['--archive-timeout=60s'])
|
||||
@ -499,8 +505,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
self.set_replica(
|
||||
master, replica,
|
||||
replica_name='replica', synchronous=True)
|
||||
master, replica, replica_name='replica', synchronous=True)
|
||||
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
|
@ -500,7 +500,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -508,6 +508,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
node.pgbench_init(scale=2)
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -529,8 +534,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=[
|
||||
"-j", "4", "--recovery-target-action=promote"]),
|
||||
options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
@ -551,7 +555,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=True)
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -559,6 +563,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
node.pgbench_init(scale=2)
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -587,8 +596,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=[
|
||||
"-j", "4", "--recovery-target-action=promote"]),
|
||||
options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
@ -609,8 +617,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on'})
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -618,6 +626,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
node.pgbench_init(scale=2)
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
||||
@ -639,8 +652,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertIn(
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["-j", "4", "--recovery-target-action=promote"]),
|
||||
backup_dir, 'node', node, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
@ -664,9 +676,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -674,6 +685,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
node.pgbench_init(scale=2)
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -703,8 +719,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertIn(
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["-j", "4", "--recovery-target-action=promote"]),
|
||||
backup_dir, 'node', node, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
|
||||
@ -731,9 +746,8 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on'})
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -741,6 +755,11 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack")
|
||||
|
||||
# wal_segment_size = self.guc_wal_segment_size(node)
|
||||
node.pgbench_init(scale=2)
|
||||
|
||||
@ -773,8 +792,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertIn(
|
||||
"INFO: Restore of backup {0} completed.".format(backup_id),
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node,
|
||||
options=["-j", "4", "--recovery-target-action=promote"]),
|
||||
backup_dir, 'node', node, options=["-j", "4"]),
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(self.output), self.cmd))
|
||||
node.slow_start()
|
||||
@ -2493,7 +2511,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
node.slow_start()
|
||||
|
||||
cat_version = node.get_control_data()["Catalog version number"]
|
||||
version_specific_dir = 'PG_' + node.major_version + '_' + cat_version
|
||||
version_specific_dir = 'PG_' + str(node.major_version) + '_' + cat_version
|
||||
|
||||
# PG_10_201707211
|
||||
# pg_tblspc/33172/PG_9.5_201510051/16386/
|
||||
@ -3162,6 +3180,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
"GRANT CONNECT ON DATABASE backupdb to backup; "
|
||||
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
|
||||
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
|
||||
@ -3177,20 +3196,41 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
if self.ptrack:
|
||||
for fname in [
|
||||
fnames = []
|
||||
if node.major_version < 12:
|
||||
fnames += [
|
||||
'pg_catalog.oideq(oid, oid)',
|
||||
'pg_catalog.ptrack_version()',
|
||||
'pg_catalog.pg_ptrack_clear()',
|
||||
'pg_catalog.pg_ptrack_control_lsn()',
|
||||
'pg_catalog.pg_ptrack_get_and_clear_db(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_and_clear(oid, oid)',
|
||||
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)',
|
||||
'pg_catalog.pg_stop_backup()']:
|
||||
|
||||
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)'
|
||||
]
|
||||
else:
|
||||
# TODO why backup works without these grants ?
|
||||
# fnames += [
|
||||
# 'pg_ptrack_get_pagemapset(pg_lsn)',
|
||||
# 'pg_ptrack_control_lsn()',
|
||||
# 'pg_ptrack_get_block(oid, oid, oid, bigint)'
|
||||
# ]
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION {0} "
|
||||
"TO backup".format(fname))
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
for fname in fnames:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname))
|
||||
|
||||
if ProbackupTest.enterprise:
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup")
|
||||
|
||||
node.safe_psql(
|
||||
"backupdb",
|
||||
"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup")
|
||||
|
||||
# FULL backup without database_map
|
||||
backup_id = self.backup_node(
|
||||
|
Loading…
Reference in New Issue
Block a user