1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-05 13:20:31 +02:00

Merge branch 'master' into pgpro-2065

This commit is contained in:
Anastasia 2019-04-12 16:03:18 +03:00
commit 1d5b9e469d
21 changed files with 343 additions and 89 deletions

View File

@ -182,6 +182,7 @@ sub build_pgprobackup
$probackup->AddFile("$pgsrc/src/bin/pg_rewind/datapagemap.c");
$probackup->AddFile("$pgsrc/src/interfaces/libpq/pthread-win32.c");
$probackup->AddFile("$pgsrc/src/timezone/strftime.c");
$probackup->AddIncludeDir("$pgsrc/src/bin/pg_basebackup");
$probackup->AddIncludeDir("$pgsrc/src/bin/pg_rewind");

View File

@ -596,6 +596,15 @@ do_backup_instance(void)
strlen(" with pg_probackup"));
pg_start_backup(label, smooth_checkpoint, &current);
/* For incremental backup check that start_lsn is not from the past */
if (current.backup_mode != BACKUP_MODE_FULL &&
prev_backup->start_lsn > current.start_lsn)
elog(ERROR, "Current START LSN %X/%X is lower than START LSN %X/%X of previous backup %s. "
"It may indicate that we are trying to backup PostgreSQL instance from the past.",
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
(uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn),
base36enc(prev_backup->start_time));
/* Update running backup meta with START LSN */
write_backup(&current);
@ -673,8 +682,8 @@ do_backup_instance(void)
* https://github.com/postgrespro/pg_probackup/issues/48
*/
if (parray_num(backup_files_list) == 0)
elog(ERROR, "PGDATA is empty. Either it was concurrently deleted or "
if (parray_num(backup_files_list) < 100)
elog(ERROR, "PGDATA is almost empty. Either it was concurrently deleted or "
"pg_probackup do not possess sufficient permissions to list PGDATA content");
/*

View File

@ -476,7 +476,7 @@ pgBackupCreateDir(pgBackup *backup)
parray *external_list;
external_list = make_external_directory_list(backup->external_dir_str);
for (int i = 0; i < parray_num(external_list); i++)
for (i = 0; i < parray_num(external_list); i++)
{
char temp[MAXPGPATH];
/* Numeration of externaldirs starts with 1 */
@ -1026,13 +1026,14 @@ is_prolific(parray *backup_list, pgBackup *target_backup)
if (tmp_backup->parent_backup == target_backup->start_time &&
(tmp_backup->status == BACKUP_STATUS_OK ||
tmp_backup->status == BACKUP_STATUS_DONE))
{
child_counter++;
if (child_counter > 1)
return true;
}
}
if (child_counter > 1)
return true;
else
return false;
return false;
}
/*
@ -1067,35 +1068,6 @@ find_parent_full_backup(pgBackup *current_backup)
return base_full_backup;
}
/*
* Find closest child of target_backup. If there are several direct
* offsprings in backup_list, then first win.
*/
pgBackup*
find_direct_child(parray *backup_list, pgBackup *target_backup)
{
int i;
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *tmp_backup = (pgBackup *) parray_get(backup_list, i);
if (tmp_backup->backup_mode == BACKUP_MODE_FULL)
continue;
/* Consider only OK and DONE children */
if (tmp_backup->parent_backup == target_backup->start_time &&
(tmp_backup->status == BACKUP_STATUS_OK ||
tmp_backup->status == BACKUP_STATUS_DONE))
{
return tmp_backup;
}
}
elog(WARNING, "Failed to find a direct child for backup %s",
base36enc(target_backup->start_time));
return NULL;
}
/*
* Interate over parent chain and look for any problems.
* Return 0 if chain is broken.
@ -1204,4 +1176,4 @@ get_backup_index_number(parray *backup_list, pgBackup *backup)
}
elog(WARNING, "Failed to find backup %s", base36enc(backup->start_time));
return -1;
}
}

View File

@ -599,7 +599,7 @@ backup_data_file(backup_files_arg* arguments,
if (file->size % BLCKSZ != 0)
{
fclose(in);
elog(ERROR, "File: %s, invalid file size %zu", file->path, file->size);
elog(WARNING, "File: %s, invalid file size %zu", file->path, file->size);
}
/*

View File

@ -129,6 +129,9 @@ int do_retention(void)
bool retention_is_set = false; /* At least one retention policy is set */
bool backup_list_is_empty = false;
backup_deleted = false;
backup_merged = false;
/* Get a complete list of backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -411,7 +414,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l
continue;
}
/* FULL backup in purge list, thanks to sparsing of keep_list current backup is
/* FULL backup in purge list, thanks to sparsing of keep_list current backup is
* final target for merge, but there could be intermediate incremental
* backups from purge_list.
*/
@ -832,8 +835,9 @@ do_delete_instance(void)
if (rmdir(backup_instance_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
strerror(errno));
if (rmdir(arclog_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
elog(ERROR, "can't remove \"%s\": %s", arclog_path,
strerror(errno));
elog(INFO, "Instance '%s' successfully deleted", instance_name);

View File

@ -472,7 +472,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
parray_qsort(black_list, BlackListCompare);
}
file = pgFileNew(root, external_dir_num ? omit_symlink : false, omit_symlink);
file = pgFileNew(root, omit_symlink, external_dir_num);
if (file == NULL)
return;

View File

@ -81,8 +81,8 @@ do_merge(time_t backup_id)
/* It is possible that previous merging was interrupted */
backup->status != BACKUP_STATUS_MERGING &&
backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
elog(ERROR, "Backup %s has status: %s",
base36enc(backup->start_time), status2str(backup->status));
if (backup->backup_mode == BACKUP_MODE_FULL)
elog(ERROR, "Backup %s is full backup",
@ -109,10 +109,8 @@ do_merge(time_t backup_id)
if (full_backup->status != BACKUP_STATUS_OK &&
/* It is possible that previous merging was interrupted */
full_backup->status != BACKUP_STATUS_MERGING)
elog(ERROR, "Backup %s has status: %s",
base36enc(full_backup->start_time), status2str(full_backup->status));
//Assert(full_backup_idx != dest_backup_idx);
elog(ERROR, "Backup %s has status: %s",
base36enc(full_backup->start_time), status2str(full_backup->status));
/* form merge list */
while(dest_backup->parent_backup_link)
@ -122,8 +120,8 @@ do_merge(time_t backup_id)
/* It is possible that previous merging was interrupted */
dest_backup->status != BACKUP_STATUS_MERGING &&
dest_backup->status != BACKUP_STATUS_DELETING)
elog(ERROR, "Backup %s has status: %s",
base36enc(dest_backup->start_time), status2str(dest_backup->status));
elog(ERROR, "Backup %s has status: %s",
base36enc(dest_backup->start_time), status2str(dest_backup->status));
parray_append(merge_list, dest_backup);
dest_backup = dest_backup->parent_backup_link;
@ -205,7 +203,8 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
* BACKUP_STATUS_MERGING status.
*/
Assert(from_backup->status == BACKUP_STATUS_OK ||
from_backup->status == BACKUP_STATUS_MERGING);
from_backup->status == BACKUP_STATUS_MERGING ||
from_backup->status == BACKUP_STATUS_DELETING);
pgBackupValidate(from_backup);
if (from_backup->status == BACKUP_STATUS_CORRUPT)
elog(ERROR, "Interrupt merging");
@ -673,10 +672,12 @@ merge_files(void *arg)
static void
remove_dir_with_files(const char *path)
{
parray *files = parray_new();
parray *files = parray_new();
int i;
dir_list_file(files, path, true, true, true, 0);
parray_qsort(files, pgFileComparePathDesc);
for (int i = 0; i < parray_num(files); i++)
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
@ -689,9 +690,11 @@ remove_dir_with_files(const char *path)
static int
get_external_index(const char *key, const parray *list)
{
int i;
if (!list) /* Nowhere to search */
return -1;
for (int i = 0; i < parray_num(list); i++)
for (i = 0; i < parray_num(list); i++)
{
if (strcmp(key, parray_get(list, i)) == 0)
return i + 1;
@ -704,11 +707,12 @@ static void
reorder_external_dirs(pgBackup *to_backup, parray *to_external,
parray *from_external)
{
char externaldir_template[MAXPGPATH];
char externaldir_template[MAXPGPATH];
int i;
pgBackupGetPath(to_backup, externaldir_template,
lengthof(externaldir_template), EXTERNAL_DIR);
for (int i = 0; i < parray_num(to_external); i++)
for (i = 0; i < parray_num(to_external); i++)
{
int from_num = get_external_index(parray_get(to_external, i),
from_external);

View File

@ -1191,8 +1191,8 @@ XLogThreadWorker(void *arg)
* Consider thread_arg->endSegNo and thread_arg->endpoint only if
* they are valid.
*/
xlogreader->ReadRecPtr == thread_arg->endpoint &&
nextSegNo > thread_arg->endSegNo)
xlogreader->ReadRecPtr >= thread_arg->endpoint &&
nextSegNo >= thread_arg->endSegNo)
break;
}

View File

@ -22,6 +22,7 @@
const char *PROGRAM_VERSION = "2.0.27";
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
const char *PROGRAM_FULL_PATH = NULL;
typedef enum ProbackupSubcmd
{
@ -211,6 +212,14 @@ main(int argc, char *argv[])
init_config(&instance_config);
PROGRAM_NAME = get_progname(argv[0]);
PROGRAM_FULL_PATH = palloc0(MAXPGPATH);
if (find_my_exec(argv[0],(char *) PROGRAM_FULL_PATH) < 0)
{
fprintf(stderr, _("%s: could not find own program executable\n"), PROGRAM_NAME);
exit(1);
}
set_pglocale_pgservice(argv[0], "pgscripts");
#if PG_VERSION_NUM >= 110000

View File

@ -517,7 +517,6 @@ extern int pgBackupCompareId(const void *f1, const void *f2);
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);
extern int pgBackupCompareIdEqual(const void *l, const void *r);
extern pgBackup* find_direct_child(parray *backup_list, pgBackup *target_backup);
extern pgBackup* find_parent_full_backup(pgBackup *current_backup);
extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup);
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);

View File

@ -796,7 +796,7 @@ create_recovery_conf(time_t backup_id,
fprintf(fp, "restore_command = '%s archive-get -B %s --instance %s "
"--wal-file-path %%p --wal-file-name %%f'\n",
PROGRAM_NAME, backup_path, instance_name);
PROGRAM_FULL_PATH, backup_path, instance_name);
/*
* We've already checked that only one of the four following mutually

View File

@ -474,7 +474,11 @@ logfile_getname(const char *format, time_t timestamp)
len = strlen(filename);
/* Treat log_filename as a strftime pattern */
#ifdef WIN32
if (pg_strftime(filename + len, MAXPGPATH - len, format, tm) <= 0)
#else
if (strftime(filename + len, MAXPGPATH - len, format, tm) <= 0)
#endif
elog_stderr(ERROR, "strftime(%s) failed: %s", format, strerror(errno));
return filename;

View File

@ -20,6 +20,7 @@ typedef void (*pgut_atexit_callback)(bool fatal, void *userdata);
* pgut client variables and functions
*/
extern const char *PROGRAM_NAME;
extern const char *PROGRAM_FULL_PATH;
extern const char *PROGRAM_VERSION;
extern const char *PROGRAM_URL;
extern const char *PROGRAM_EMAIL;

View File

@ -19,7 +19,7 @@ def load_tests(loader, tests, pattern):
# suite.addTests(loader.loadTestsFromModule(cfs_backup))
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
# suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(logging))
suite.addTests(loader.loadTestsFromModule(compression))
suite.addTests(loader.loadTestsFromModule(delete))
suite.addTests(loader.loadTestsFromModule(delta))

View File

@ -422,10 +422,10 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_senders': '2',
'archive_timeout': '10s',
'max_wal_size': '1GB'}
)
'checkpoint_timeout': '30s',
'max_wal_size': '16MB'})
self.init_pb(backup_dir)
# ADD INSTANCE 'MASTER'
self.add_instance(backup_dir, 'master', master)

View File

@ -55,6 +55,38 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_del_instance_archive(self):
"""delete full backups"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# full backup
self.backup_node(backup_dir, 'node', node)
# full backup
self.backup_node(backup_dir, 'node', node)
# restore
node.cleanup()
self.restore_node(backup_dir, 'node', node)
node.slow_start()
# Delete instance
self.del_instance(backup_dir, 'node')
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_delete_archive_mix_compress_and_non_compressed_segments(self):

View File

@ -352,17 +352,11 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'
}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# self.set_archiving(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL BACKUP
@ -372,8 +366,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"md5(i::text)::tsvector as tsvector from generate_series(0,1) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=['--stream'])
backup_dir, 'node', node, backup_type='full')
# delta BACKUP
node.safe_psql(
@ -382,8 +375,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"md5(i::text)::tsvector as tsvector from generate_series(0,2) i")
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
backup_dir, 'node', node, backup_type='delta')
# Drop Node
node.cleanup()
@ -719,14 +711,12 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
1, 0,
"Expecting Error because we are connecting to deleted database"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)
)
repr(self.output), self.cmd))
except QueryException as e:
self.assertTrue(
'FATAL: database "db1" does not exist' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)
)
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
@ -1315,3 +1305,53 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_delta_backup_from_past(self):
"""
make node, take FULL stream backup, take DELTA stream backup,
restore FULL backup, try to take second DELTA stream backup
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
backup_id = self.backup_node(
backup_dir, 'node', node, options=['--stream'])
node.pgbench_init(scale=3)
# First DELTA
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
# Restore FULL backup
node.cleanup()
self.restore_node(backup_dir, 'node', node, backup_id=backup_id)
node.slow_start()
# Second DELTA backup
try:
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because we are backing up an instance from the past"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except QueryException as e:
self.assertTrue(
'Insert error message' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -39,7 +39,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--log-directory=log-directory]
[--log-rotation-size=log-rotation-size]
[--log-rotation-age=log-rotation-age]
[--delete-expired] [--delete-wal]
[--delete-expired] [--delete-wal] [--merge-expired]
[--retention-redundancy=retention-redundancy]
[--retention-window=retention-window]
[--compress]
@ -51,16 +51,19 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--master-port=port] [--master-user=user_name]
[--replica-timeout=timeout]
[--skip-block-validation]
[--external-dirs=external-directory-path]
pg_probackup restore -B backup-path --instance=instance_name
[-D pgdata-path] [-i backup-id] [--progress]
[-D pgdata-path] [-i backup-id] [-j num-threads]
[--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]
[--timeline=timeline] [-T OLDDIR=NEWDIR]
[--timeline=timeline] [-T OLDDIR=NEWDIR] [--progress]
[--external-mapping=OLDDIR=NEWDIR]
[--immediate] [--recovery-target-name=target-name]
[--recovery-target-action=pause|promote|shutdown]
[--restore-as-replica]
[--no-validate]
[--skip-block-validation]
[--skip-external-dirs]
pg_probackup validate -B backup-path [--instance=instance_name]
[-i backup-id] [--progress] [-j num-threads]
@ -74,10 +77,11 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database.
[--format=format]
pg_probackup delete -B backup-path --instance=instance_name
[--wal] [-i backup-id | --expired]
[--wal] [-i backup-id | --expired | --merge-expired]
[--dry-run]
pg_probackup merge -B backup-path --instance=instance_name
-i backup-id
-i backup-id [--progress] [-j num-threads]
pg_probackup add-instance -B backup-path -D pgdata-path
--instance=instance_name

View File

@ -1172,6 +1172,179 @@ class ExternalTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_contain_symlink_on_dir(self):
"""
Check that backup works correctly if external dir is symlink,
symlink pointing to external dir should be followed,
but restored as directory
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
core_dir = os.path.join(self.tmp_path, module_name, fname)
shutil.rmtree(core_dir, ignore_errors=True)
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
external_dir = self.get_tblspace_path(node, 'external_dir')
dir_in_external_dir = os.path.join(external_dir, 'dir')
node.pgbench_init(scale=3)
# temp FULL backup
backup_id = self.backup_node(
backup_dir, 'node', node, options=["-j", "4", "--stream"])
# fill some directory with data
core_dir = os.path.join(self.tmp_path, module_name, fname)
symlinked_dir = os.path.join(core_dir, 'symlinked')
self.restore_node(
backup_dir, 'node', node,
data_dir=symlinked_dir, options=["-j", "4"])
# drop temp FULL backup
self.delete_pb(backup_dir, 'node', backup_id=backup_id)
# create symlink to directory in external directory
print(symlinked_dir)
print(dir_in_external_dir)
os.mkdir(external_dir)
os.symlink(symlinked_dir, dir_in_external_dir)
# FULL backup with external directories
backup_id = self.backup_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--stream",
"-E", "{0}".format(
external_dir)])
pgdata = self.pgdata_content(
node.base_dir, exclude_dirs=['logs'])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
# RESTORE
node_restored.cleanup()
external_dir_new = self.get_tblspace_path(
node_restored, 'external_dir')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4", "--external-mapping={0}={1}".format(
external_dir, external_dir_new)])
pgdata_restored = self.pgdata_content(
node_restored.base_dir, exclude_dirs=['logs'])
self.compare_pgdata(pgdata, pgdata_restored)
self.assertEqual(
external_dir,
self.show_pb(
backup_dir, 'node',
backup_id=backup_id)['external-dirs'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_contain_symlink_on_file(self):
"""
Check that backup works correctly if external dir is symlink,
symlink pointing to external dir should be followed,
but restored as directory
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
core_dir = os.path.join(self.tmp_path, module_name, fname)
shutil.rmtree(core_dir, ignore_errors=True)
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
external_dir = self.get_tblspace_path(node, 'external_dir')
file_in_external_dir = os.path.join(external_dir, 'file')
node.pgbench_init(scale=3)
# temp FULL backup
backup_id = self.backup_node(
backup_dir, 'node', node, options=["-j", "4", "--stream"])
# fill some directory with data
core_dir = os.path.join(self.tmp_path, module_name, fname)
symlinked_dir = os.path.join(core_dir, 'symlinked')
self.restore_node(
backup_dir, 'node', node,
data_dir=symlinked_dir, options=["-j", "4"])
# drop temp FULL backup
self.delete_pb(backup_dir, 'node', backup_id=backup_id)
# create symlink to directory in external directory
src_file = os.path.join(symlinked_dir, 'postgresql.conf')
os.mkdir(external_dir)
os.symlink(src_file, file_in_external_dir)
# FULL backup with external directories
backup_id = self.backup_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--stream",
"-E", "{0}".format(
external_dir)])
pgdata = self.pgdata_content(
node.base_dir, exclude_dirs=['logs'])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
# RESTORE
node_restored.cleanup()
external_dir_new = self.get_tblspace_path(
node_restored, 'external_dir')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4", "--external-mapping={0}={1}".format(
external_dir, external_dir_new)])
pgdata_restored = self.pgdata_content(
node_restored.base_dir, exclude_dirs=['logs'])
self.compare_pgdata(pgdata, pgdata_restored)
self.assertEqual(
external_dir,
self.show_pb(
backup_dir, 'node',
backup_id=backup_id)['external-dirs'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_is_tablespace(self):

View File

@ -1197,7 +1197,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
def test_continue_failed_merge_2(self):
"""
Check that failed MERGE on delete can`t be continued
Check that failed MERGE on delete can be continued
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
@ -1253,6 +1253,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
backup_id_deleted = self.show_pb(backup_dir, "node")[1]["id"]
# TODO check that full backup has meta info is equal to DELETTING
# Try to continue failed MERGE
self.merge_backup(backup_dir, "node", backup_id)
# Clean after yourself

View File

@ -141,10 +141,10 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'archive_timeout': '10s'}
)
'archive_timeout': '10s',
'checkpoint_timeout': '30s',
'max_wal_size': '16MB'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)