1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-07-15 07:04:14 +02:00

[Issue #265][PGPRO-5421] archive-push backward compatibility (#437)

Restore the --wal-file-path option of the archive-push command (it was ignored since a196073) 

Co-authored-by: Mikhail A. Kulagin <m.kulagin@postgrespro.ru>
Co-authored-by: Elena Indrupskaya <e.indrupskaya@postgrespro.ru>
This commit is contained in:
dlepikhova
2021-11-22 12:41:49 +05:00
committed by GitHub
parent 5b6ca62417
commit b87ca18bfc
17 changed files with 338 additions and 86 deletions

View File

@ -131,6 +131,7 @@ doc/src/sgml/pgprobackup.sgml
<arg choice="plain"><option>archive-push</option></arg>
<arg choice="plain"><option>-B</option> <replaceable>backup_dir</replaceable></arg>
<arg choice="plain"><option>--instance</option> <replaceable>instance_name</replaceable></arg>
<arg choice="plain"><option>--wal-file-path</option> <replaceable>wal_file_path</replaceable></arg>
<arg choice="plain"><option>--wal-file-name</option> <replaceable>wal_file_name</replaceable></arg>
<arg rep="repeat"><replaceable>option</replaceable></arg>
</cmdsynopsis>
@ -5367,7 +5368,9 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
Provides the path to the WAL file in
<parameter>archive_command</parameter> and
<parameter>restore_command</parameter>. Use the <literal>%p</literal>
variable as the value for this option for correct processing.
variable as the value for this option or explicitly specify the path to a file
outside of the data directory. If you skip this option, the path
specified in <filename>pg_probackup.conf</filename> will be used.
</para>
</listitem>
</varlistentry>
@ -5380,6 +5383,8 @@ pg_probackup catchup -b <replaceable>catchup_mode</replaceable>
<parameter>archive_command</parameter> and
<parameter>restore_command</parameter>. Use the <literal>%f</literal>
variable as the value for this option for correct processing.
If the value of <option>--wal-file-path</option> is a path
outside of the data directory, explicitly specify the filename.
</para>
</listitem>
</varlistentry>

View File

@ -3,7 +3,7 @@
* archive.c: - pg_probackup specific archive commands for archive backups.
*
*
* Portions Copyright (c) 2018-2019, Postgres Professional
* Portions Copyright (c) 2018-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -113,15 +113,13 @@ static parray *setup_push_filelist(const char *archive_status_dir,
* Where archlog_path is $BACKUP_PATH/wal/instance_name
*/
void
do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path,
do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir,
char *wal_file_name, int batch_size, bool overwrite,
bool no_sync, bool no_ready_rename)
{
uint64 i;
char current_dir[MAXPGPATH];
char pg_xlog_dir[MAXPGPATH];
char archive_status_dir[MAXPGPATH];
uint64 system_id;
/* usually instance pgdata/pg_wal/archive_status, empty if no_ready_rename or batch_size == 1 */
char archive_status_dir[MAXPGPATH] = "";
bool is_compress = false;
/* arrays with meta info for multi threaded backup */
@ -141,31 +139,8 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa
parray *batch_files = NULL;
int n_threads;
if (wal_file_name == NULL)
elog(ERROR, "Required parameter is not specified: --wal-file-name %%f");
if (!getcwd(current_dir, sizeof(current_dir)))
elog(ERROR, "getcwd() error");
/* verify that archive-push --instance parameter is valid */
system_id = get_system_identifier(current_dir, FIO_DB_HOST);
if (instance->pgdata == NULL)
elog(ERROR, "Cannot read pg_probackup.conf for this instance");
if (system_id != instance->system_identifier)
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
"Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT,
wal_file_name, instanceState->instance_name, instance->system_identifier, system_id);
if (instance->compress_alg == PGLZ_COMPRESS)
elog(ERROR, "Cannot use pglz for WAL compression");
join_path_components(pg_xlog_dir, current_dir, XLOGDIR);
join_path_components(archive_status_dir, pg_xlog_dir, "archive_status");
/* Create 'archlog_path' directory. Do nothing if it already exists. */
//fio_mkdir(instanceState->instance_wal_subdir_path, DIR_PERMISSION, FIO_BACKUP_HOST);
if (!no_ready_rename || batch_size > 1)
join_path_components(archive_status_dir, pg_xlog_dir, "archive_status");
#ifdef HAVE_LIBZ
if (instance->compress_alg == ZLIB_COMPRESS)
@ -204,12 +179,13 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa
{
int rc;
WALSegno *xlogfile = (WALSegno *) parray_get(batch_files, i);
bool first_wal = strcmp(xlogfile->name, wal_file_name) == 0;
rc = push_file(xlogfile, archive_status_dir,
rc = push_file(xlogfile, first_wal ? NULL : archive_status_dir,
pg_xlog_dir, instanceState->instance_wal_subdir_path,
overwrite, no_sync,
instance->archive_timeout,
no_ready_rename || (strcmp(xlogfile->name, wal_file_name) == 0) ? true : false,
no_ready_rename || first_wal,
is_compress && IsXLogFileName(xlogfile->name) ? true : false,
instance->compress_level);
if (rc == 0)
@ -233,7 +209,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa
arg->first_filename = wal_file_name;
arg->archive_dir = instanceState->instance_wal_subdir_path;
arg->pg_xlog_dir = pg_xlog_dir;
arg->archive_status_dir = archive_status_dir;
arg->archive_status_dir = (!no_ready_rename || batch_size > 1) ? archive_status_dir : NULL;
arg->overwrite = overwrite;
arg->compress = is_compress;
arg->no_sync = no_sync;
@ -276,7 +252,7 @@ do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wa
/* Note, that we are leaking memory here,
* because pushing into archive is a very
* time-sensetive operation, so we skip freeing stuff.
* time-sensitive operation, so we skip freeing stuff.
*/
push_done:
@ -356,9 +332,6 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir,
int compress_level)
{
int rc;
char wal_file_dummy[MAXPGPATH];
join_path_components(wal_file_dummy, archive_status_dir, xlogfile->name);
elog(LOG, "pushing file \"%s\"", xlogfile->name);
@ -375,11 +348,13 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir,
#endif
/* take '--no-ready-rename' flag into account */
if (!no_ready_rename)
if (!no_ready_rename && archive_status_dir != NULL)
{
char wal_file_dummy[MAXPGPATH];
char wal_file_ready[MAXPGPATH];
char wal_file_done[MAXPGPATH];
join_path_components(wal_file_dummy, archive_status_dir, xlogfile->name);
snprintf(wal_file_ready, MAXPGPATH, "%s.%s", wal_file_dummy, "ready");
snprintf(wal_file_done, MAXPGPATH, "%s.%s", wal_file_dummy, "done");

View File

@ -943,7 +943,7 @@ check_system_identifiers(PGconn *conn, const char *pgdata)
uint64 system_id_conn;
uint64 system_id_pgdata;
system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST);
system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false);
system_id_conn = get_remote_system_identifier(conn);
/* for checkdb check only system_id_pgdata and system_id_conn */

View File

@ -48,7 +48,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons
/* Get WAL segments size and system ID of source PG instance */
instance_config.xlog_seg_size = get_xlog_seg_size(source_pgdata);
instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST);
instance_config.system_identifier = get_system_identifier(source_pgdata, FIO_DB_HOST, false);
current.start_time = time(NULL);
strlcpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version));
@ -163,7 +163,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn,
uint64 source_conn_id, source_id, dest_id;
source_conn_id = get_remote_system_identifier(source_conn);
source_id = get_system_identifier(source_pgdata, FIO_DB_HOST); /* same as instance_config.system_identifier */
source_id = get_system_identifier(source_pgdata, FIO_DB_HOST, false); /* same as instance_config.system_identifier */
if (source_conn_id != source_id)
elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu",
@ -171,7 +171,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn,
if (current.backup_mode != BACKUP_MODE_FULL)
{
dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST);
dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false);
if (source_conn_id != dest_id)
elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu",
source_conn_id, dest_pgdata, dest_id);

View File

@ -227,6 +227,7 @@ help_pg_probackup(void)
printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" [--wal-file-path=wal-file-path]\n"));
printf(_(" [-j num-threads] [--batch-size=batch_size]\n"));
printf(_(" [--archive-timeout=timeout]\n"));
printf(_(" [--no-ready-rename] [--no-sync]\n"));
@ -937,6 +938,7 @@ help_archive_push(void)
{
printf(_("\n%s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" [--wal-file-path=wal-file-path]\n"));
printf(_(" [-j num-threads] [--batch-size=batch_size]\n"));
printf(_(" [--archive-timeout=timeout]\n"));
printf(_(" [--no-ready-rename] [--no-sync]\n"));
@ -951,6 +953,8 @@ help_archive_push(void)
printf(_(" --instance=instance_name name of the instance to delete\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" name of the file to copy into WAL archive\n"));
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" relative destination path of the WAL archive\n"));
printf(_(" -j, --threads=NUM number of parallel threads\n"));
printf(_(" --batch-size=NUM number of files to be copied\n"));
printf(_(" --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n"));
@ -981,8 +985,8 @@ static void
help_archive_get(void)
{
printf(_("\n%s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
printf(_(" --wal-file-name=wal-file-name\n"));
printf(_(" [--wal-file-path=wal-file-path]\n"));
printf(_(" [-j num-threads] [--batch-size=batch_size]\n"));
printf(_(" [--no-validate-wal]\n"));
printf(_(" [--remote-proto] [--remote-host]\n"));

View File

@ -57,7 +57,7 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance)
"(-D, --pgdata)");
/* Read system_identifier from PGDATA */
instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST);
instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false);
/* Starting from PostgreSQL 11 read WAL segment size from PGDATA */
instance->xlog_seg_size = get_xlog_seg_size(instance->pgdata);

View File

@ -35,7 +35,7 @@
* which includes info about pgdata directory and connection.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -151,6 +151,7 @@ static char *wal_file_path;
static char *wal_file_name;
static bool file_overwrite = false;
static bool no_ready_rename = false;
static char archive_push_xlog_dir[MAXPGPATH] = "";
/* archive get options */
static char *prefetch_dir;
@ -788,7 +789,7 @@ main(int argc, char *argv[])
current.stream = stream_wal = true;
if (instance_config.external_dir_str)
elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd));
// TODO проверить instance_config.conn_opt
// TODO check instance_config.conn_opt
}
/* sanity */
@ -796,6 +797,97 @@ main(int argc, char *argv[])
elog(ERROR, "You cannot specify \"--no-validate\" option with the \"%s\" command",
get_subcmd_name(backup_subcmd));
if (backup_subcmd == ARCHIVE_PUSH_CMD)
{
/* Check archive-push parameters and construct archive_push_xlog_dir
*
* There are 4 cases:
* 1. no --wal-file-path specified -- use cwd, ./PG_XLOG_DIR for wal files
* (and ./PG_XLOG_DIR/archive_status for .done files inside do_archive_push())
* in this case we can use batches and threads
* 2. --wal-file-path is specified and it is the same dir as stored in pg_probackup.conf (instance_config.pgdata)
* in this case we can use this path, as well as batches and thread
* 3. --wal-file-path is specified and it isn't same dir as stored in pg_probackup.conf but control file present with correct system_id
* in this case we can use this path, as well as batches and thread
* (replica for example, see test_archive_push_sanity)
* 4. --wal-file-path is specified and it is different from instance_config.pgdata and no control file found
* disable optimizations and work with user specified path
*/
bool check_system_id = true;
uint64 system_id;
char current_dir[MAXPGPATH];
if (wal_file_name == NULL)
elog(ERROR, "Required parameter is not specified: --wal-file-name %%f");
if (instance_config.pgdata == NULL)
elog(ERROR, "Cannot read pg_probackup.conf for this instance");
/* TODO may be remove in preference of checking inside compress_init()? */
if (instance_config.compress_alg == PGLZ_COMPRESS)
elog(ERROR, "Cannot use pglz for WAL compression");
if (!getcwd(current_dir, sizeof(current_dir)))
elog(ERROR, "getcwd() error");
if (wal_file_path == NULL)
{
/* 1st case */
system_id = get_system_identifier(current_dir, FIO_DB_HOST, false);
join_path_components(archive_push_xlog_dir, current_dir, XLOGDIR);
}
else
{
/*
* Usually we get something like
* wal_file_path = "pg_wal/0000000100000000000000A1"
* wal_file_name = "0000000100000000000000A1"
* instance_config.pgdata = "/pgdata/.../node/data"
* We need to strip wal_file_name from wal_file_path, add XLOGDIR to instance_config.pgdata
* and compare this directories.
* Note, that pg_wal can be symlink (see test_waldir_outside_pgdata_archiving)
*/
char *stripped_wal_file_path = pgut_str_strip_trailing_filename(wal_file_path, wal_file_name);
join_path_components(archive_push_xlog_dir, instance_config.pgdata, XLOGDIR);
if (fio_is_same_file(stripped_wal_file_path, archive_push_xlog_dir, true, FIO_DB_HOST))
{
/* 2nd case */
system_id = get_system_identifier(instance_config.pgdata, FIO_DB_HOST, false);
/* archive_push_xlog_dir already have right value */
}
else
{
if (strlen(stripped_wal_file_path) < MAXPGPATH)
strncpy(archive_push_xlog_dir, stripped_wal_file_path, MAXPGPATH);
else
elog(ERROR, "Value specified to --wal_file_path is too long");
system_id = get_system_identifier(current_dir, FIO_DB_HOST, true);
/* 3rd case if control file present -- i.e. system_id != 0 */
if (system_id == 0)
{
/* 4th case */
check_system_id = false;
if (batch_size > 1 || num_threads > 1 || !no_ready_rename)
{
elog(WARNING, "Supplied --wal_file_path is outside pgdata, force safe values for options: --batch-size=1 -j 1 --no-ready-rename");
batch_size = 1;
num_threads = 1;
no_ready_rename = true;
}
}
}
pfree(stripped_wal_file_path);
}
if (check_system_id && system_id != instance_config.system_identifier)
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
"Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT,
wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id);
}
#if PG_VERSION_NUM >= 100000
if (temp_slot && perm_slot)
elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option");
@ -819,7 +911,7 @@ main(int argc, char *argv[])
switch (backup_subcmd)
{
case ARCHIVE_PUSH_CMD:
do_archive_push(instanceState, &instance_config, wal_file_path, wal_file_name,
do_archive_push(instanceState, &instance_config, archive_push_xlog_dir, wal_file_name,
batch_size, file_overwrite, no_sync, no_ready_rename);
break;
case ARCHIVE_GET_CMD:

View File

@ -889,7 +889,7 @@ extern int do_init(CatalogState *catalogState);
extern int do_add_instance(InstanceState *instanceState, InstanceConfig *instance);
/* in archive.c */
extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *wal_file_path,
extern void do_archive_push(InstanceState *instanceState, InstanceConfig *instance, char *pg_xlog_dir,
char *wal_file_name, int batch_size, bool overwrite,
bool no_sync, bool no_ready_rename);
extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const char *prefetch_dir_arg, char *wal_file_path,
@ -1153,7 +1153,7 @@ extern XLogRecPtr get_next_record_lsn(const char *archivedir, XLogSegNo segno, T
extern TimeLineID get_current_timeline(PGconn *conn);
extern TimeLineID get_current_timeline_from_control(const char *pgdata_path, fio_location location, bool safe);
extern XLogRecPtr get_checkpoint_location(PGconn *conn);
extern uint64 get_system_identifier(const char *pgdata_path, fio_location location);
extern uint64 get_system_identifier(const char *pgdata_path, fio_location location, bool safe);
extern uint64 get_remote_system_identifier(PGconn *conn);
extern uint32 get_data_checksum_version(bool safe);
extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path);

View File

@ -2186,7 +2186,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
*/
elog(INFO, "Trying to read pg_control file in destination directory");
system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST);
system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false);
if (system_id_pgdata == instance_config.system_identifier)
system_id_match = true;

View File

@ -247,15 +247,15 @@ get_checkpoint_location(PGconn *conn)
}
uint64
get_system_identifier(const char *pgdata_path, fio_location location)
get_system_identifier(const char *pgdata_path, fio_location location, bool safe)
{
ControlFileData ControlFile;
char *buffer;
size_t size;
/* First fetch file... */
buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location);
if (buffer == NULL)
buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, safe, location);
if (safe && buffer == NULL)
return 0;
digestControlFile(&ControlFile, buffer, size);
pg_free(buffer);

View File

@ -1141,6 +1141,33 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo
}
}
/*
* Compare, that filename1 and filename2 is the same file
* in windows compare only filenames
*/
bool
fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location)
{
#ifndef WIN32
struct stat stat1, stat2;
if (fio_stat(filename1, &stat1, follow_symlink, location) < 0)
elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno));
if (fio_stat(filename2, &stat2, follow_symlink, location) < 0)
elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno));
return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev;
#else
char *abs_name1 = make_absolute_path(filename1);
char *abs_name2 = make_absolute_path(filename2);
bool result = strcmp(abs_name1, abs_name2) == 0;
free(abs_name2);
free(abs_name1);
return result;
#endif
}
/*
* Read value of a symbolic link
* this is a wrapper about readlink() syscall

View File

@ -129,6 +129,7 @@ extern int fio_mkdir(char const* path, int mode, fio_location location);
extern int fio_chmod(char const* path, int mode, fio_location location);
extern int fio_access(char const* path, int mode, fio_location location);
extern int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location);
extern bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location);
extern ssize_t fio_readlink(const char *path, char *value, size_t valsiz, fio_location location);
extern DIR* fio_opendir(char const* path, fio_location location);
extern struct dirent * fio_readdir(DIR *dirp);

View File

@ -977,6 +977,22 @@ pgut_strndup(const char *str, size_t n)
return ret;
}
/*
* Allocates new string, that contains part of filepath string minus trailing filename string
* If trailing filename string not found, returns copy of filepath.
* Result must be free by caller.
*/
char *
pgut_str_strip_trailing_filename(const char *filepath, const char *filename)
{
size_t fp_len = strlen(filepath);
size_t fn_len = strlen(filename);
if (strncmp(filepath + fp_len - fn_len, filename, fn_len) == 0)
return pgut_strndup(filepath, fp_len - fn_len);
else
return pgut_strndup(filepath, fp_len);
}
FILE *
pgut_fopen(const char *path, const char *mode, bool missing_ok)
{

View File

@ -63,6 +63,7 @@ extern void *pgut_malloc0(size_t size);
extern void *pgut_realloc(void *p, size_t size);
extern char *pgut_strdup(const char *str);
extern char *pgut_strndup(const char *str, size_t n);
extern char *pgut_str_strip_trailing_filename(const char *filepath, const char *filename);
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
#define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type)))

View File

@ -1828,6 +1828,133 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_undefined_wal_file_path(self):
"""
check that archive-push works correct with undefined
--wal-file-path
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
if os.name == 'posix':
archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format(
self.probackup_path, backup_dir, 'node')
elif os.name == 'nt':
archive_command = '\"{0}\" archive-push -B \"{1}\" --instance \"{2}\" --wal-file-name=%f'.format(
self.probackup_path, backup_dir, 'node').replace("\\","\\\\")
else:
self.assertTrue(False, 'Unexpected os family')
self.set_auto_conf(
node,
{'archive_command': archive_command})
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0, 10) i")
self.switch_wal_segment(node)
# check
self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001')
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_intermediate_archiving(self):
"""
check that archive-push works correct with --wal-file-path setting by user
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
node_pg_options = {}
if node.major_version >= 13:
node_pg_options['wal_keep_size'] = '0MB'
else:
node_pg_options['wal_keep_segments'] = '0'
self.set_auto_conf(node, node_pg_options)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
wal_dir = os.path.join(self.tmp_path, module_name, fname, 'intermediate_dir')
shutil.rmtree(wal_dir, ignore_errors=True)
os.makedirs(wal_dir)
if os.name == 'posix':
self.set_archiving(backup_dir, 'node', node, custom_archive_command='cp -v %p {0}/%f'.format(wal_dir))
elif os.name == 'nt':
self.set_archiving(backup_dir, 'node', node, custom_archive_command='copy /Y "%p" "{0}\\\\%f"'.format(wal_dir.replace("\\","\\\\")))
else:
self.assertTrue(False, 'Unexpected os family')
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0, 10) i")
self.switch_wal_segment(node)
wal_segment = '000000010000000000000001'
self.run_pb(["archive-push", "-B", backup_dir,
"--instance=node", "-D", node.data_dir,
"--wal-file-path", "{0}/{1}".format(wal_dir, wal_segment), "--wal-file-name", wal_segment])
self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment)
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_waldir_outside_pgdata_archiving(self):
"""
check that archive-push works correct with symlinked waldir
"""
if self.pg_config_version < self.version_to_num('10.0'):
return unittest.skip(
'Skipped because waldir outside pgdata is supported since PG 10')
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir')
shutil.rmtree(external_wal_dir, ignore_errors=True)
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select i"
" as id from generate_series(0, 10) i")
self.switch_wal_segment(node)
# check
self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001')
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_hexadecimal_timeline(self):

View File

@ -144,6 +144,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
pg_probackup archive-push -B backup-path --instance=instance_name
--wal-file-name=wal-file-name
[--wal-file-path=wal-file-path]
[-j num-threads] [--batch-size=batch_size]
[--archive-timeout=timeout]
[--no-ready-rename] [--no-sync]

View File

@ -1296,7 +1296,8 @@ class ProbackupTest(object):
def set_archiving(
self, backup_dir, instance, node, replica=False,
overwrite=False, compress=True, old_binary=False,
log_level=False, archive_timeout=False):
log_level=False, archive_timeout=False,
custom_archive_command=None):
# parse postgresql.auto.conf
options = {}
@ -1306,45 +1307,47 @@ class ProbackupTest(object):
else:
options['archive_mode'] = 'on'
if os.name == 'posix':
options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path, backup_dir, instance)
if custom_archive_command is None:
if os.name == 'posix':
options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path, backup_dir, instance)
elif os.name == 'nt':
options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path.replace("\\","\\\\"),
backup_dir.replace("\\","\\\\"), instance)
elif os.name == 'nt':
options['archive_command'] = '"{0}" archive-push -B {1} --instance={2} '.format(
self.probackup_path.replace("\\","\\\\"),
backup_dir.replace("\\","\\\\"), instance)
# don`t forget to kill old_binary after remote ssh release
if self.remote and not old_binary:
options['archive_command'] += '--remote-proto=ssh '
options['archive_command'] += '--remote-host=localhost '
# don`t forget to kill old_binary after remote ssh release
if self.remote and not old_binary:
options['archive_command'] += '--remote-proto=ssh '
options['archive_command'] += '--remote-host=localhost '
if self.archive_compress and compress:
options['archive_command'] += '--compress '
if self.archive_compress and compress:
options['archive_command'] += '--compress '
if overwrite:
options['archive_command'] += '--overwrite '
if overwrite:
options['archive_command'] += '--overwrite '
options['archive_command'] += '--log-level-console=VERBOSE '
options['archive_command'] += '-j 5 '
options['archive_command'] += '--batch-size 10 '
options['archive_command'] += '--no-sync '
options['archive_command'] += '--log-level-console=VERBOSE '
options['archive_command'] += '-j 5 '
options['archive_command'] += '--batch-size 10 '
options['archive_command'] += '--no-sync '
if archive_timeout:
options['archive_command'] += '--archive-timeout={0} '.format(
archive_timeout)
if archive_timeout:
options['archive_command'] += '--archive-timeout={0} '.format(
archive_timeout)
if os.name == 'posix':
options['archive_command'] += '--wal-file-path=%p --wal-file-name=%f'
if os.name == 'posix':
options['archive_command'] += '--wal-file-path=%p --wal-file-name=%f'
elif os.name == 'nt':
options['archive_command'] += '--wal-file-path="%p" --wal-file-name="%f"'
if log_level:
options['archive_command'] += ' --log-level-console={0}'.format(log_level)
options['archive_command'] += ' --log-level-file={0} '.format(log_level)
elif os.name == 'nt':
options['archive_command'] += '--wal-file-path="%p" --wal-file-name="%f"'
if log_level:
options['archive_command'] += ' --log-level-console={0}'.format(log_level)
options['archive_command'] += ' --log-level-file={0} '.format(log_level)
else: # custom_archive_command is not None
options['archive_command'] = custom_archive_command
self.set_auto_conf(node, options)