mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-01-20 11:34:51 +02:00
Merge branch 'release_2_4' into issue_205
This commit is contained in:
commit
59715f24de
@ -4055,10 +4055,10 @@ pg_probackup archive-push -B <replaceable>backup_dir</replaceable> --instance <r
|
||||
corruption.
|
||||
</para>
|
||||
<para>
|
||||
To speed up archiving, you can specify the <option>-j</option> option
|
||||
to run <command>archive-push</command> on multiple threads.
|
||||
If you provide the <option>--batch-size</option> option, WAL files
|
||||
will be copied in batches of the specified size.
|
||||
To speed up archiving, you can specify the <option>--batch-size</option> option
|
||||
to copy WAL segments in batches of the specified size.
|
||||
If <option>--batch-size</option> option is used, then you can also specify
|
||||
the <option>-j</option> option to copy the batch of WAL segments on multiple threads.
|
||||
</para>
|
||||
<para>
|
||||
WAL segments copied to the archive are synced to disk unless
|
||||
@ -4096,10 +4096,10 @@ pg_probackup archive-get -B <replaceable>backup_dir</replaceable> --instance <re
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To speed up recovery, you can specify the <option>-j</option> option
|
||||
to run <command>archive-get</command> on multiple threads.
|
||||
If you provide the <option>--batch-size</option> option, WAL segments
|
||||
will be copied in batches of the specified size.
|
||||
To speed up recovery, you can specify the <option>--batch-size</option> option
|
||||
to copy WAL segments in batches of the specified size.
|
||||
If <option>--batch-size</option> option is used, then you can also specify
|
||||
the <option>-j</option> option to copy the batch of WAL segments on multiple threads.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -1109,7 +1109,7 @@ do_archive_get(InstanceConfig *instance, const char *prefetch_dir_arg,
|
||||
{
|
||||
/* discard prefetch */
|
||||
// n_fetched = 0;
|
||||
rmtree(prefetch_dir, false);
|
||||
pgut_rmtree(prefetch_dir, false, false);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -191,7 +191,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool
|
||||
{
|
||||
/* try to setup multi-timeline backup chain */
|
||||
elog(WARNING, "Valid backup on current timeline %u is not found, "
|
||||
"try to look up on previous timelines",
|
||||
"trying to look up on previous timelines",
|
||||
current.tli);
|
||||
|
||||
tli_list = catalog_get_timelines(&instance_config);
|
||||
|
@ -985,7 +985,7 @@ do_delete_instance(void)
|
||||
parray_free(backup_list);
|
||||
|
||||
/* Delete all wal files. */
|
||||
rmtree(arclog_path, false);
|
||||
pgut_rmtree(arclog_path, false, true);
|
||||
|
||||
/* Delete backup instance config file */
|
||||
join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
|
||||
|
@ -1038,6 +1038,8 @@ extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FIL
|
||||
extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink,
|
||||
bool add_root, bool backup_logs, int external_dir_num);
|
||||
|
||||
extern bool pgut_rmtree(const char *path, bool rmtopdir, bool strict);
|
||||
|
||||
/* return codes for fio_send_pages() and fio_send_file() */
|
||||
#define SEND_OK (0)
|
||||
#define FILE_MISSING (-1)
|
||||
|
140
src/utils/pgut.c
140
src/utils/pgut.c
@ -43,6 +43,9 @@ static void on_interrupt(void);
|
||||
static void on_cleanup(void);
|
||||
static pqsigfunc oldhandler = NULL;
|
||||
|
||||
static char ** pgut_pgfnames(const char *path, bool strict);
|
||||
static void pgut_pgfnames_cleanup(char **filenames);
|
||||
|
||||
void discard_response(PGconn *conn);
|
||||
|
||||
void
|
||||
@ -1062,3 +1065,140 @@ discard_response(PGconn *conn)
|
||||
PQclear(res);
|
||||
} while (res);
|
||||
}
|
||||
|
||||
/*
|
||||
* pgfnames
|
||||
*
|
||||
* return a list of the names of objects in the argument directory. Caller
|
||||
* must call pgfnames_cleanup later to free the memory allocated by this
|
||||
* function.
|
||||
*/
|
||||
char **
|
||||
pgut_pgfnames(const char *path, bool strict)
|
||||
{
|
||||
DIR *dir;
|
||||
struct dirent *file;
|
||||
char **filenames;
|
||||
int numnames = 0;
|
||||
int fnsize = 200; /* enough for many small dbs */
|
||||
|
||||
dir = opendir(path);
|
||||
if (dir == NULL)
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not open directory \"%s\": %m", path);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
filenames = (char **) palloc(fnsize * sizeof(char *));
|
||||
|
||||
while (errno = 0, (file = readdir(dir)) != NULL)
|
||||
{
|
||||
if (strcmp(file->d_name, ".") != 0 && strcmp(file->d_name, "..") != 0)
|
||||
{
|
||||
if (numnames + 1 >= fnsize)
|
||||
{
|
||||
fnsize *= 2;
|
||||
filenames = (char **) repalloc(filenames,
|
||||
fnsize * sizeof(char *));
|
||||
}
|
||||
filenames[numnames++] = pstrdup(file->d_name);
|
||||
}
|
||||
}
|
||||
|
||||
if (errno)
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not read directory \"%s\": %m", path);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
filenames[numnames] = NULL;
|
||||
|
||||
if (closedir(dir))
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not close directory \"%s\": %m", path);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return filenames;
|
||||
}
|
||||
|
||||
/*
|
||||
* pgfnames_cleanup
|
||||
*
|
||||
* deallocate memory used for filenames
|
||||
*/
|
||||
void
|
||||
pgut_pgfnames_cleanup(char **filenames)
|
||||
{
|
||||
char **fn;
|
||||
|
||||
for (fn = filenames; *fn; fn++)
|
||||
pfree(*fn);
|
||||
|
||||
pfree(filenames);
|
||||
}
|
||||
|
||||
/* Shamelessly stolen from commom/rmtree.c */
|
||||
bool
|
||||
pgut_rmtree(const char *path, bool rmtopdir, bool strict)
|
||||
{
|
||||
bool result = true;
|
||||
char pathbuf[MAXPGPATH];
|
||||
char **filenames;
|
||||
char **filename;
|
||||
struct stat statbuf;
|
||||
|
||||
/*
|
||||
* we copy all the names out of the directory before we start modifying
|
||||
* it.
|
||||
*/
|
||||
filenames = pgut_pgfnames(path, strict);
|
||||
|
||||
if (filenames == NULL)
|
||||
return false;
|
||||
|
||||
/* now we have the names we can start removing things */
|
||||
for (filename = filenames; *filename; filename++)
|
||||
{
|
||||
snprintf(pathbuf, MAXPGPATH, "%s/%s", path, *filename);
|
||||
|
||||
if (lstat(pathbuf, &statbuf) != 0)
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not stat file or directory \"%s\": %m", pathbuf);
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (S_ISDIR(statbuf.st_mode))
|
||||
{
|
||||
/* call ourselves recursively for a directory */
|
||||
if (!pgut_rmtree(pathbuf, true, strict))
|
||||
{
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (unlink(pathbuf) != 0)
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not remove file or directory \"%s\": %m", pathbuf);
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rmtopdir)
|
||||
{
|
||||
if (rmdir(path) != 0)
|
||||
{
|
||||
elog(strict ? ERROR : WARNING, "could not remove file or directory \"%s\": %m", path);
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
|
||||
pgut_pgfnames_cleanup(filenames);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
133
tests/backup.py
133
tests/backup.py
@ -147,10 +147,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
"ERROR: Valid backup on current timeline 1 is not found. "
|
||||
"Create new FULL backup before an incremental one.",
|
||||
e.message,
|
||||
self.assertTrue(
|
||||
"WARNING: Valid backup on current timeline 1 is not found" in e.message and
|
||||
"ERROR: Create new full backup before an incremental one" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
@ -165,10 +164,9 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"without valid full backup.\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
"ERROR: Valid backup on current timeline 1 is not found. "
|
||||
"Create new FULL backup before an incremental one.",
|
||||
e.message,
|
||||
self.assertTrue(
|
||||
"WARNING: Valid backup on current timeline 1 is not found" in e.message and
|
||||
"ERROR: Create new full backup before an incremental one" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
@ -176,6 +174,10 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.show_pb(backup_dir, 'node')[0]['status'],
|
||||
"ERROR")
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['status'],
|
||||
"ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -315,10 +317,8 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_corruption_heal_via_ptrack_1(self):
|
||||
def test_page_detect_corruption(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -380,98 +380,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_corruption_heal_via_ptrack_2(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
ptrack_enable=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT;")
|
||||
|
||||
heap_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
node.stop()
|
||||
|
||||
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
|
||||
f.seek(9000)
|
||||
f.write(b"bla")
|
||||
f.flush()
|
||||
f.close
|
||||
node.slow_start()
|
||||
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream", '--log-level-console=LOG'])
|
||||
# we should die here because exception is what we expect to happen
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of page "
|
||||
"corruption in PostgreSQL instance.\n"
|
||||
" Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
if self.remote:
|
||||
self.assertTrue(
|
||||
"WARNING: File" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
else:
|
||||
self.assertTrue(
|
||||
"LOG: File" in e.message and
|
||||
"blknum" in e.message and
|
||||
"have wrong checksum" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
|
||||
"Backup Status should be ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_backup_detect_corruption(self):
|
||||
@ -495,6 +403,10 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"postgres",
|
||||
"create extension ptrack")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4", "--stream"])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
@ -529,10 +441,6 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node.slow_start()
|
||||
|
||||
# self.backup_node(
|
||||
# backup_dir, 'node', node,
|
||||
# backup_type="full", options=["-j", "4", "--stream"])
|
||||
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
@ -608,12 +516,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
"\n Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
'WARNING: page verification failed, '
|
||||
'calculated checksum' in e.message and
|
||||
'ERROR: query failed: ERROR: '
|
||||
'invalid page in block 1 of relation' in e.message and
|
||||
'ERROR: Data files transferring failed' in e.message,
|
||||
self.assertIn(
|
||||
'ERROR: Corruption detected in file "{0}", block 1: '
|
||||
'page verification failed, calculated checksum'.format(
|
||||
heap_fullpath),
|
||||
e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
|
157
tests/delta.py
157
tests/delta.py
@ -1050,163 +1050,6 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_delta_corruption_heal_via_ptrack(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="full", options=["-j", "4", "--stream"])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT;")
|
||||
|
||||
heap_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
|
||||
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
|
||||
f.seek(9000)
|
||||
f.write(b"bla")
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type="delta",
|
||||
options=["-j", "4", "--stream", '--log-level-file=verbose'])
|
||||
|
||||
# open log file and check
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
log_content = f.read()
|
||||
self.assertIn('block 1, try to fetch via shared buffer', log_content)
|
||||
self.assertIn('SELECT pg_catalog.pg_ptrack_get_block', log_content)
|
||||
f.close
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'OK',
|
||||
"Backup Status should be OK")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_page_corruption_heal_via_ptrack(self):
|
||||
"""make node, corrupt some page, check that backup failed"""
|
||||
if not self.ptrack:
|
||||
return unittest.skip('Skipped because ptrack support is disabled')
|
||||
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'])
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
if node.major_version >= 12:
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE EXTENSION ptrack WITH SCHEMA pg_catalog")
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="full",
|
||||
options=["-j", "4", "--stream"])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select 1 as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,1000) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT;")
|
||||
|
||||
heap_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('t_heap')").rstrip()
|
||||
node.stop()
|
||||
|
||||
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
|
||||
f.seek(9000)
|
||||
f.write(b"bla")
|
||||
f.flush()
|
||||
f.close
|
||||
node.slow_start()
|
||||
|
||||
try:
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type="delta",
|
||||
options=["-j", "4", "--stream", "--log-level-console=LOG"])
|
||||
# we should die here because exception is what we expect to happen
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of page "
|
||||
"corruption in PostgreSQL instance.\n"
|
||||
" Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
if self.remote:
|
||||
self.assertTrue(
|
||||
"LOG: File" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
else:
|
||||
self.assertTrue(
|
||||
"WARNING: File" in e.message and
|
||||
"blknum" in e.message and
|
||||
"have wrong checksum" in e.message and
|
||||
"try to fetch via shared buffer" in e.message and
|
||||
"WARNING: page verification failed, "
|
||||
"calculated checksum" in e.message and
|
||||
"ERROR: query failed: "
|
||||
"ERROR: invalid page in block" in e.message and
|
||||
"query was: SELECT pg_catalog.pg_ptrack_get_block" in e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[1]['status'] == 'ERROR',
|
||||
"Backup Status should be ERROR")
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_delta_nullified_heap_page_backup(self):
|
||||
"""
|
||||
make node, take full backup, nullify some heap block,
|
||||
|
@ -153,3 +153,85 @@ class ExcludeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_exclude_log_dir(self):
|
||||
"""
|
||||
check that by default 'log' and 'pg_log' directories are not backed up
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'logging_collector': 'on',
|
||||
'log_filename': 'postgresql.log'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='full', options=['--stream'])
|
||||
|
||||
log_dir = node.safe_psql(
|
||||
'postgres',
|
||||
'show log_directory').rstrip()
|
||||
|
||||
node.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node, options=["-j", "4"])
|
||||
|
||||
# check that PGDATA/log or PGDATA/pg_log do not exists
|
||||
path = os.path.join(node.data_dir, log_dir)
|
||||
log_file = os.path.join(path, 'postgresql.log')
|
||||
self.assertTrue(os.path.exists(path))
|
||||
self.assertFalse(os.path.exists(log_file))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_exclude_log_dir_1(self):
|
||||
"""
|
||||
check that "--backup-pg-log" works correctly
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'logging_collector': 'on',
|
||||
'log_filename': 'postgresql.log'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
log_dir = node.safe_psql(
|
||||
'postgres',
|
||||
'show log_directory').rstrip()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='full', options=['--stream', '--backup-pg-log'])
|
||||
|
||||
node.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node, options=["-j", "4"])
|
||||
|
||||
# check that PGDATA/log or PGDATA/pg_log do not exists
|
||||
path = os.path.join(node.data_dir, log_dir)
|
||||
log_file = os.path.join(path, 'postgresql.log')
|
||||
self.assertTrue(os.path.exists(path))
|
||||
self.assertTrue(os.path.exists(log_file))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
Loading…
x
Reference in New Issue
Block a user