1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-03 14:01:57 +02:00

Merge branch 'master' into remote_pull

This commit is contained in:
Grigory Smolkin 2019-04-05 01:05:49 +03:00
commit f1cd6c23c5
8 changed files with 202 additions and 57 deletions

View File

@ -663,8 +663,8 @@ do_backup_instance(void)
* https://github.com/postgrespro/pg_probackup/issues/48
*/
if (parray_num(backup_files_list) == 0)
elog(ERROR, "PGDATA is empty. Either it was concurrently deleted or "
if (parray_num(backup_files_list) < 100)
elog(ERROR, "PGDATA is almost empty. Either it was concurrently deleted or "
"pg_probackup do not possess sufficient permissions to list PGDATA content");
/*

View File

@ -476,7 +476,7 @@ pgBackupCreateDir(pgBackup *backup)
parray *external_list;
external_list = make_external_directory_list(backup->external_dir_str);
for (int i = 0; i < parray_num(external_list); i++)
for (i = 0; i < parray_num(external_list); i++)
{
char temp[MAXPGPATH];
/* Numeration of externaldirs starts with 1 */
@ -1022,13 +1022,14 @@ is_prolific(parray *backup_list, pgBackup *target_backup)
if (tmp_backup->parent_backup == target_backup->start_time &&
(tmp_backup->status == BACKUP_STATUS_OK ||
tmp_backup->status == BACKUP_STATUS_DONE))
{
child_counter++;
if (child_counter > 1)
return true;
}
}
if (child_counter > 1)
return true;
else
return false;
return false;
}
/*
@ -1063,35 +1064,6 @@ find_parent_full_backup(pgBackup *current_backup)
return base_full_backup;
}
/*
* Find closest child of target_backup. If there are several direct
* offsprings in backup_list, then first win.
*/
pgBackup*
find_direct_child(parray *backup_list, pgBackup *target_backup)
{
int i;
for (i = 0; i < parray_num(backup_list); i++)
{
pgBackup *tmp_backup = (pgBackup *) parray_get(backup_list, i);
if (tmp_backup->backup_mode == BACKUP_MODE_FULL)
continue;
/* Consider only OK and DONE children */
if (tmp_backup->parent_backup == target_backup->start_time &&
(tmp_backup->status == BACKUP_STATUS_OK ||
tmp_backup->status == BACKUP_STATUS_DONE))
{
return tmp_backup;
}
}
elog(WARNING, "Failed to find a direct child for backup %s",
base36enc(target_backup->start_time));
return NULL;
}
/*
* Interate over parent chain and look for any problems.
* Return 0 if chain is broken.

View File

@ -129,6 +129,9 @@ int do_retention(void)
bool retention_is_set = false; /* At least one retention policy is set */
bool backup_list_is_empty = false;
backup_deleted = false;
backup_merged = false;
/* Get a complete list of backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -832,8 +835,9 @@ do_delete_instance(void)
if (rmdir(backup_instance_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
strerror(errno));
if (rmdir(arclog_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
elog(ERROR, "can't remove \"%s\": %s", arclog_path,
strerror(errno));
elog(INFO, "Instance '%s' successfully deleted", instance_name);

View File

@ -473,7 +473,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
parray_qsort(black_list, BlackListCompare);
}
file = pgFileNew(root, external_dir_num ? omit_symlink : false, external_dir_num, location);
file = pgFileNew(root, omit_symlink, external_dir_num, location);
if (file == NULL)
return;

View File

@ -673,10 +673,12 @@ merge_files(void *arg)
static void
remove_dir_with_files(const char *path)
{
parray *files = parray_new();
parray *files = parray_new();
int i;
dir_list_file(files, path, true, true, true, 0, FIO_LOCAL_HOST);
parray_qsort(files, pgFileComparePathDesc);
for (int i = 0; i < parray_num(files); i++)
for (i = 0; i < parray_num(files); i++)
{
pgFile *file = (pgFile *) parray_get(files, i);
@ -689,9 +691,11 @@ remove_dir_with_files(const char *path)
static int
get_external_index(const char *key, const parray *list)
{
int i;
if (!list) /* Nowhere to search */
return -1;
for (int i = 0; i < parray_num(list); i++)
for (i = 0; i < parray_num(list); i++)
{
if (strcmp(key, parray_get(list, i)) == 0)
return i + 1;
@ -704,11 +708,12 @@ static void
reorder_external_dirs(pgBackup *to_backup, parray *to_external,
parray *from_external)
{
char externaldir_template[MAXPGPATH];
char externaldir_template[MAXPGPATH];
int i;
pgBackupGetPath(to_backup, externaldir_template,
lengthof(externaldir_template), EXTERNAL_DIR);
for (int i = 0; i < parray_num(to_external); i++)
for (i = 0; i < parray_num(to_external); i++)
{
int from_num = get_external_index(parray_get(to_external, i),
from_external);

View File

@ -531,7 +531,6 @@ extern int pgBackupCompareId(const void *f1, const void *f2);
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);
extern int pgBackupCompareIdEqual(const void *l, const void *r);
extern pgBackup* find_direct_child(parray *backup_list, pgBackup *target_backup);
extern pgBackup* find_parent_full_backup(pgBackup *current_backup);
extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup);
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);

View File

@ -352,17 +352,11 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'checkpoint_timeout': '30s'
}
)
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
# self.set_archiving(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL BACKUP
@ -372,8 +366,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"md5(i::text)::tsvector as tsvector from generate_series(0,1) i")
full_result = node.execute("postgres", "SELECT * FROM t_heap")
full_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='full', options=['--stream'])
backup_dir, 'node', node, backup_type='full')
# delta BACKUP
node.safe_psql(
@ -382,8 +375,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
"md5(i::text)::tsvector as tsvector from generate_series(0,2) i")
delta_result = node.execute("postgres", "SELECT * FROM t_heap")
delta_backup_id = self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
backup_dir, 'node', node, backup_type='delta')
# Drop Node
node.cleanup()

View File

@ -1172,6 +1172,179 @@ class ExternalTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_contain_symlink_on_dir(self):
"""
Check that backup works correctly if external dir is symlink,
symlink pointing to external dir should be followed,
but restored as directory
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
core_dir = os.path.join(self.tmp_path, module_name, fname)
shutil.rmtree(core_dir, ignore_errors=True)
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
external_dir = self.get_tblspace_path(node, 'external_dir')
dir_in_external_dir = os.path.join(external_dir, 'dir')
node.pgbench_init(scale=3)
# temp FULL backup
backup_id = self.backup_node(
backup_dir, 'node', node, options=["-j", "4", "--stream"])
# fill some directory with data
core_dir = os.path.join(self.tmp_path, module_name, fname)
symlinked_dir = os.path.join(core_dir, 'symlinked')
self.restore_node(
backup_dir, 'node', node,
data_dir=symlinked_dir, options=["-j", "4"])
# drop temp FULL backup
self.delete_pb(backup_dir, 'node', backup_id=backup_id)
# create symlink to directory in external directory
print(symlinked_dir)
print(dir_in_external_dir)
os.mkdir(external_dir)
os.symlink(symlinked_dir, dir_in_external_dir)
# FULL backup with external directories
backup_id = self.backup_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--stream",
"-E", "{0}".format(
external_dir)])
pgdata = self.pgdata_content(
node.base_dir, exclude_dirs=['logs'])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
# RESTORE
node_restored.cleanup()
external_dir_new = self.get_tblspace_path(
node_restored, 'external_dir')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4", "--external-mapping={0}={1}".format(
external_dir, external_dir_new)])
pgdata_restored = self.pgdata_content(
node_restored.base_dir, exclude_dirs=['logs'])
self.compare_pgdata(pgdata, pgdata_restored)
self.assertEqual(
external_dir,
self.show_pb(
backup_dir, 'node',
backup_id=backup_id)['external-dirs'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_contain_symlink_on_file(self):
"""
Check that backup works correctly if external dir is symlink,
symlink pointing to external dir should be followed,
but restored as directory
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
core_dir = os.path.join(self.tmp_path, module_name, fname)
shutil.rmtree(core_dir, ignore_errors=True)
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
external_dir = self.get_tblspace_path(node, 'external_dir')
file_in_external_dir = os.path.join(external_dir, 'file')
node.pgbench_init(scale=3)
# temp FULL backup
backup_id = self.backup_node(
backup_dir, 'node', node, options=["-j", "4", "--stream"])
# fill some directory with data
core_dir = os.path.join(self.tmp_path, module_name, fname)
symlinked_dir = os.path.join(core_dir, 'symlinked')
self.restore_node(
backup_dir, 'node', node,
data_dir=symlinked_dir, options=["-j", "4"])
# drop temp FULL backup
self.delete_pb(backup_dir, 'node', backup_id=backup_id)
# create symlink to directory in external directory
src_file = os.path.join(symlinked_dir, 'postgresql.conf')
os.mkdir(external_dir)
os.symlink(src_file, file_in_external_dir)
# FULL backup with external directories
backup_id = self.backup_node(
backup_dir, 'node', node,
options=[
"-j", "4", "--stream",
"-E", "{0}".format(
external_dir)])
pgdata = self.pgdata_content(
node.base_dir, exclude_dirs=['logs'])
node_restored = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node_restored'))
# RESTORE
node_restored.cleanup()
external_dir_new = self.get_tblspace_path(
node_restored, 'external_dir')
self.restore_node(
backup_dir, 'node', node_restored,
options=[
"-j", "4", "--external-mapping={0}={1}".format(
external_dir, external_dir_new)])
pgdata_restored = self.pgdata_content(
node_restored.base_dir, exclude_dirs=['logs'])
self.compare_pgdata(pgdata, pgdata_restored)
self.assertEqual(
external_dir,
self.show_pb(
backup_dir, 'node',
backup_id=backup_id)['external-dirs'])
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
def test_external_dir_is_tablespace(self):