mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-01-07 13:40:17 +02:00
Merge branch 'issue_169' into issue_171
This commit is contained in:
commit
80bcb80ada
@ -192,8 +192,8 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync)
|
||||
"Create new FULL backup before an incremental one.",
|
||||
current.tli);
|
||||
|
||||
pgBackupGetPath(prev_backup, prev_backup_filelist_path,
|
||||
lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST);
|
||||
join_path_components(prev_backup_filelist_path, prev_backup->root_dir,
|
||||
DATABASE_FILE_LIST);
|
||||
/* Files of previous backup needed by DELTA backup */
|
||||
prev_backup_filelist = dir_read_file_list(NULL, NULL, prev_backup_filelist_path, FIO_BACKUP_HOST);
|
||||
|
||||
@ -1450,8 +1450,8 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli,
|
||||
|
||||
if (!stream_wal && is_start_lsn && try_count == 30)
|
||||
elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. "
|
||||
"If continius archiving is not set up, use '--stream' option to make autonomous backup. "
|
||||
"Otherwise check that continius archiving works correctly.");
|
||||
"If continuous archiving is not set up, use '--stream' option to make autonomous backup. "
|
||||
"Otherwise check that continuous archiving works correctly.");
|
||||
|
||||
if (timeout > 0 && try_count > timeout)
|
||||
{
|
||||
|
36
src/data.c
36
src/data.c
@ -237,7 +237,7 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
|
||||
{
|
||||
int i;
|
||||
/* Check if the page is zeroed. */
|
||||
for(i = 0; i < BLCKSZ && page[i] == 0; i++);
|
||||
for (i = 0; i < BLCKSZ && page[i] == 0; i++);
|
||||
|
||||
/* Page is zeroed. No need to check header and checksum. */
|
||||
if (i == BLCKSZ)
|
||||
@ -271,9 +271,10 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
|
||||
* should be a pointer to allocated BLCKSZ of bytes.
|
||||
*
|
||||
* Prints appropriate warnings/errors/etc into log.
|
||||
* Returns 0 if page was successfully retrieved
|
||||
* SkipCurrentPage(-3) if we need to skip this page
|
||||
* Returns:
|
||||
* PageIsOk(0) if page was successfully retrieved
|
||||
* PageIsTruncated(-2) if the page was truncated
|
||||
* SkipCurrentPage(-3) if we need to skip this page
|
||||
* PageIsCorrupted(-4) if the page check mismatch
|
||||
*/
|
||||
static int32
|
||||
@ -312,9 +313,8 @@ prepare_page(ConnectionArgs *conn_arg,
|
||||
switch (result)
|
||||
{
|
||||
case 2:
|
||||
page_is_valid = true;
|
||||
elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum);
|
||||
break;
|
||||
return PageIsOk;
|
||||
|
||||
case 1:
|
||||
page_is_valid = true;
|
||||
@ -393,6 +393,12 @@ prepare_page(ConnectionArgs *conn_arg,
|
||||
}
|
||||
}
|
||||
|
||||
/* Get page via ptrack interface from PostgreSQL shared buffer.
|
||||
* We do this in following cases:
|
||||
* 1. PTRACK backup of 1.x versions
|
||||
* 2. During backup, regardless of backup mode, of PostgreSQL instance
|
||||
* with ptrack support we encountered invalid page.
|
||||
*/
|
||||
if ((backup_mode == BACKUP_MODE_DIFF_PTRACK
|
||||
&& (ptrack_version_num >= 15 && ptrack_version_num < 20))
|
||||
|| !page_is_valid)
|
||||
@ -434,7 +440,6 @@ prepare_page(ConnectionArgs *conn_arg,
|
||||
!parse_page(page, &page_lsn))
|
||||
elog(ERROR, "Cannot parse page after pg_ptrack_get_block. "
|
||||
"Possible risk of a memory corruption");
|
||||
|
||||
}
|
||||
|
||||
if (page_is_truncated)
|
||||
@ -641,6 +646,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
|
||||
if (file->pagemap.bitmapsize == PageBitmapIsEmpty ||
|
||||
file->pagemap_isabsent || !file->exists_in_prev)
|
||||
{
|
||||
/* remote FULL and DELTA */
|
||||
if (fio_is_remote_file(in))
|
||||
{
|
||||
int rc = fio_send_pages(in, out, file,
|
||||
@ -664,6 +670,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* local FULL and DELTA */
|
||||
RetryUsingPtrack:
|
||||
for (blknum = 0; blknum < nblocks; blknum++)
|
||||
{
|
||||
@ -684,7 +691,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
|
||||
page_state, curr_page, calg, clevel,
|
||||
from_fullpath, to_fullpath);
|
||||
else
|
||||
elog(ERROR, "Illegal page state: %i, file: %s, blknum %i",
|
||||
elog(ERROR, "Invalid page state: %i, file: %s, blknum %i",
|
||||
page_state, file->rel_path, blknum);
|
||||
|
||||
n_blocks_read++;
|
||||
@ -713,6 +720,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
|
||||
if (page_state == PageIsTruncated)
|
||||
break;
|
||||
|
||||
/* TODO: PAGE and PTRACK should never get SkipCurrentPage */
|
||||
else if (page_state == SkipCurrentPage)
|
||||
n_blocks_skipped++;
|
||||
|
||||
@ -721,7 +729,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
|
||||
page_state, curr_page, calg, clevel,
|
||||
from_fullpath, to_fullpath);
|
||||
else
|
||||
elog(ERROR, "Illegal page state: %i, file: %s, blknum %i",
|
||||
elog(ERROR, "Invalid page state: %i, file: %s, blknum %i",
|
||||
page_state, file->rel_path, blknum);
|
||||
|
||||
n_blocks_read++;
|
||||
@ -1260,22 +1268,18 @@ create_empty_file(fio_location from_location, const char *to_root,
|
||||
/* open file for write */
|
||||
join_path_components(to_path, to_root, file->rel_path);
|
||||
out = fio_fopen(to_path, PG_BINARY_W, to_location);
|
||||
|
||||
if (out == NULL)
|
||||
{
|
||||
elog(ERROR, "cannot open destination file \"%s\": %s",
|
||||
elog(ERROR, "Cannot open destination file \"%s\": %s",
|
||||
to_path, strerror(errno));
|
||||
}
|
||||
|
||||
/* update file permission */
|
||||
if (fio_chmod(to_path, file->mode, to_location) == -1)
|
||||
{
|
||||
fio_fclose(out);
|
||||
elog(ERROR, "cannot change mode of \"%s\": %s", to_path,
|
||||
elog(ERROR, "Cannot change mode of \"%s\": %s", to_path,
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
if (fio_fclose(out))
|
||||
elog(ERROR, "cannot close \"%s\": %s", to_path, strerror(errno));
|
||||
elog(ERROR, "Cannot close \"%s\": %s", to_path, strerror(errno));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
51
src/merge.c
51
src/merge.c
@ -403,6 +403,9 @@ do_merge(time_t backup_id)
|
||||
* full backup directory.
|
||||
* Remove unnecessary directories and files from full backup directory.
|
||||
* Update metadata of full backup to represent destination backup.
|
||||
*
|
||||
* TODO: stop relying on caller to provide valid parent_chain, make sure
|
||||
* that chain is ok.
|
||||
*/
|
||||
void
|
||||
merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup)
|
||||
@ -481,17 +484,15 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Should we keep relying on caller to provide valid parent_chain? */
|
||||
|
||||
/* If destination backup compression algorihtm differs from
|
||||
* full backup compression algorihtm, then in-place merge is
|
||||
/* If destination backup compression algorithm differs from
|
||||
* full backup compression algorithm, then in-place merge is
|
||||
* not possible.
|
||||
*/
|
||||
if (full_backup->compress_alg == dest_backup->compress_alg)
|
||||
compression_match = true;
|
||||
else
|
||||
elog(WARNING, "In-place merge is disabled because of compression "
|
||||
"algorihtms mismatch");
|
||||
"algorithms mismatch");
|
||||
|
||||
/*
|
||||
* If current program version differs from destination backup version,
|
||||
@ -502,13 +503,19 @@ merge_chain(parray *parent_chain, pgBackup *full_backup, pgBackup *dest_backup)
|
||||
program_version_match = true;
|
||||
else
|
||||
elog(WARNING, "In-place merge is disabled because of program "
|
||||
"versions mismatch");
|
||||
"versions mismatch: backup %s was produced by version %s, "
|
||||
"but current program version is %s",
|
||||
base36enc(dest_backup->start_time),
|
||||
dest_backup->program_version, PROGRAM_VERSION);
|
||||
|
||||
/* Construct path to database dir: /backup_dir/instance_name/FULL/database */
|
||||
join_path_components(full_database_dir, full_backup->root_dir, DATABASE_DIR);
|
||||
/* Construct path to external dir: /backup_dir/instance_name/FULL/external */
|
||||
join_path_components(full_external_prefix, full_backup->root_dir, EXTERNAL_DIR);
|
||||
|
||||
elog(INFO, "Validate parent chain for backup %s",
|
||||
base36enc(dest_backup->start_time));
|
||||
|
||||
/*
|
||||
* Validate or revalidate all members of parent chain
|
||||
* with sole exception of FULL backup. If it has MERGING status
|
||||
@ -823,8 +830,9 @@ merge_files(void *arg)
|
||||
{
|
||||
int i;
|
||||
merge_files_arg *arguments = (merge_files_arg *) arg;
|
||||
size_t n_files = parray_num(arguments->dest_backup->files);
|
||||
|
||||
for (i = 0; i < parray_num(arguments->dest_backup->files); i++)
|
||||
for (i = 0; i < n_files; i++)
|
||||
{
|
||||
pgFile *dest_file = (pgFile *) parray_get(arguments->dest_backup->files, i);
|
||||
pgFile *tmp_file;
|
||||
@ -849,8 +857,8 @@ merge_files(void *arg)
|
||||
goto done;
|
||||
|
||||
if (progress)
|
||||
elog(INFO, "Progress: (%d/%lu). Process file \"%s\"",
|
||||
i + 1, (unsigned long) parray_num(arguments->dest_backup->files), dest_file->rel_path);
|
||||
elog(INFO, "Progress: (%d/%lu). Merging file \"%s\"",
|
||||
i + 1, n_files, dest_file->rel_path);
|
||||
|
||||
if (dest_file->is_datafile && !dest_file->is_cfs)
|
||||
tmp_file->segno = dest_file->segno;
|
||||
@ -862,14 +870,13 @@ merge_files(void *arg)
|
||||
tmp_file->crc = dest_file->crc;
|
||||
|
||||
tmp_file->write_size = 0;
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* If file didn`t changed over the course of all incremental chain,
|
||||
* then do in-place merge, unless destination backup has
|
||||
* different compression algorihtm.
|
||||
* different compression algorithm.
|
||||
* In-place merge is also impossible, if program version of destination
|
||||
* backup differs from PROGRAM_VERSION
|
||||
*/
|
||||
@ -1072,7 +1079,7 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external,
|
||||
|
||||
/* Merge is usually happens as usual backup/restore via temp files, unless
|
||||
* file didn`t changed since FULL backup AND full a dest backup have the
|
||||
* same compression algorihtm. In this case file can be left as it is.
|
||||
* same compression algorithm. In this case file can be left as it is.
|
||||
*/
|
||||
void
|
||||
merge_data_file(parray *parent_chain, pgBackup *full_backup,
|
||||
@ -1111,14 +1118,28 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup,
|
||||
dest_backup->compress_alg, dest_backup->compress_level,
|
||||
dest_backup->checksum_version, 0, NULL, false);
|
||||
|
||||
/* drop restored temp file */
|
||||
if (unlink(to_fullpath_tmp1) == -1)
|
||||
elog(ERROR, "Cannot remove file \"%s\": %s", to_fullpath_tmp1,
|
||||
strerror(errno));
|
||||
|
||||
/*
|
||||
* In old (=<2.2.7) versions of pg_probackup n_blocks attribute of files
|
||||
* in PAGE and PTRACK wasn`t filled.
|
||||
*/
|
||||
// Assert(tmp_file->n_blocks == dest_file->n_blocks);
|
||||
//Assert(tmp_file->n_blocks == dest_file->n_blocks);
|
||||
|
||||
/* Backward compatibility kludge:
|
||||
* When merging old backups, it is possible that
|
||||
* to_fullpath_tmp2 size will be 0, and so it will be
|
||||
* truncated in backup_data_file().
|
||||
* TODO: remove in 3.0.0
|
||||
*/
|
||||
if (tmp_file->write_size == 0)
|
||||
return;
|
||||
|
||||
if (fio_sync(to_fullpath_tmp2, FIO_BACKUP_HOST) != 0)
|
||||
elog(ERROR, "Cannot fsync merge temp file \"%s\": %s",
|
||||
elog(ERROR, "Cannot sync merge temp file \"%s\": %s",
|
||||
to_fullpath_tmp2, strerror(errno));
|
||||
|
||||
/* Do atomic rename from second temp file to destination file */
|
||||
@ -1223,7 +1244,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup,
|
||||
|
||||
/* TODO: --no-sync support */
|
||||
if (fio_sync(to_fullpath_tmp, FIO_BACKUP_HOST) != 0)
|
||||
elog(ERROR, "Cannot fsync merge temp file \"%s\": %s",
|
||||
elog(ERROR, "Cannot sync merge temp file \"%s\": %s",
|
||||
to_fullpath_tmp, strerror(errno));
|
||||
|
||||
/* Do atomic rename from second temp file to destination file */
|
||||
|
@ -864,7 +864,8 @@ compress_init(void)
|
||||
{
|
||||
if (instance_config.compress_level != COMPRESS_LEVEL_DEFAULT
|
||||
&& instance_config.compress_alg == NOT_DEFINED_COMPRESS)
|
||||
elog(ERROR, "Cannot specify compress-level option without compress-alg option");
|
||||
elog(ERROR, "Cannot specify compress-level option alone without "
|
||||
"compress-algorithm option");
|
||||
}
|
||||
|
||||
if (instance_config.compress_level < 0 || instance_config.compress_level > 9)
|
||||
|
@ -697,6 +697,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
|
||||
if (S_ISDIR(dest_file->mode))
|
||||
continue;
|
||||
|
||||
/* skip external files if ordered to do so */
|
||||
if (dest_file->external_dir_num > 0 &&
|
||||
params->skip_external_dirs)
|
||||
continue;
|
||||
|
||||
/* construct fullpath */
|
||||
if (dest_file->external_dir_num == 0)
|
||||
{
|
||||
@ -708,7 +713,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
|
||||
}
|
||||
else
|
||||
{
|
||||
char *external_path = parray_get(external_dirs, dest_file->external_dir_num - 1);
|
||||
char *external_path = parray_get(external_dirs, dest_file->external_dir_num - 1);
|
||||
join_path_components(to_fullpath, external_path, dest_file->rel_path);
|
||||
}
|
||||
|
||||
@ -831,8 +836,10 @@ restore_files(void *arg)
|
||||
to_fullpath, strerror(errno_tmp));
|
||||
}
|
||||
|
||||
if (!fio_is_remote_file(out))
|
||||
setbuf(out, buffer);
|
||||
/* update file permission */
|
||||
if (fio_chmod(to_fullpath, dest_file->mode, FIO_DB_HOST) == -1)
|
||||
elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath,
|
||||
strerror(errno));
|
||||
|
||||
if (!dest_file->is_datafile || dest_file->is_cfs)
|
||||
elog(VERBOSE, "Restoring non-data file: \"%s\"", to_fullpath);
|
||||
@ -843,6 +850,9 @@ restore_files(void *arg)
|
||||
if (dest_file->write_size == 0)
|
||||
goto done;
|
||||
|
||||
if (!fio_is_remote_file(out))
|
||||
setbuf(out, buffer);
|
||||
|
||||
/* Restore destination file */
|
||||
if (dest_file->is_datafile && !dest_file->is_cfs)
|
||||
/* Destination file is data file */
|
||||
@ -853,30 +863,7 @@ restore_files(void *arg)
|
||||
arguments->restored_bytes += restore_non_data_file(arguments->parent_chain,
|
||||
arguments->dest_backup, dest_file, out, to_fullpath);
|
||||
|
||||
/*
|
||||
* Destination file is data file.
|
||||
* Iterate over incremental chain and lookup given destination file.
|
||||
* Apply changed blocks to destination file from every backup in parent chain.
|
||||
*/
|
||||
|
||||
done:
|
||||
|
||||
/* Truncate file up to n_blocks. NOTE: no need, we just should not write
|
||||
* blocks that are exceeding n_blocks.
|
||||
* But for this to work, n_blocks should be trusted.
|
||||
*/
|
||||
|
||||
/* update file permission
|
||||
* TODO: chmod must be done right after fopen()
|
||||
*/
|
||||
if (fio_chmod(to_fullpath, dest_file->mode, FIO_DB_HOST) == -1)
|
||||
{
|
||||
int errno_tmp = errno;
|
||||
fio_fclose(out);
|
||||
elog(ERROR, "Cannot change mode of \"%s\": %s", to_fullpath,
|
||||
strerror(errno_tmp));
|
||||
}
|
||||
|
||||
done:
|
||||
/* close file */
|
||||
if (fio_fclose(out) != 0)
|
||||
elog(ERROR, "Cannot close file \"%s\": %s", to_fullpath,
|
||||
|
@ -255,7 +255,7 @@ pgBackupValidateFiles(void *arg)
|
||||
continue;
|
||||
|
||||
if (progress)
|
||||
elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
|
||||
elog(INFO, "Progress: (%d/%d). Validate file \"%s\"",
|
||||
i + 1, num_files, file->path);
|
||||
|
||||
/*
|
||||
|
@ -1023,6 +1023,11 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
# FULL backup
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"insert into t_heap select i"
|
||||
" as id from generate_series(101,102) i")
|
||||
|
||||
# PAGE backup
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
@ -1039,11 +1044,10 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
|
||||
log_content = f.read()
|
||||
self.assertTrue(
|
||||
'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
|
||||
'File "{0}" should be deleted but it`s not'.format(absolute_path))
|
||||
backup_id = self.show_pb(backup_dir, 'node')[1]['id']
|
||||
|
||||
filelist = self.get_backup_filelist(backup_dir, 'node', backup_id)
|
||||
self.assertNotIn(relative_path, filelist)
|
||||
|
||||
node.cleanup()
|
||||
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
|
||||
@ -1347,7 +1351,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node, gdb=True,
|
||||
options=['--stream', '--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -1385,7 +1389,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node, gdb=True,
|
||||
options=['--stream', '--log-level-file=LOG'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -1422,7 +1426,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True, options=['--stream'])
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -1513,7 +1517,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
'ERROR: cannot open file',
|
||||
'ERROR: Cannot open file',
|
||||
e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
@ -85,8 +85,8 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
options=["-c", "4", "-T", "20"])
|
||||
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
@ -105,6 +105,44 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'create table tmp as select * from pgbench_accounts where aid < 1000')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'delete from pgbench_accounts')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'VACUUM')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into pgbench_accounts select * from pgbench_accounts')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -118,8 +156,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'autovacuum': 'off'})
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
@ -189,8 +226,7 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta')
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='delta')
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -204,6 +240,44 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'create table tmp as select * from pgbench_accounts where aid < 1000')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'delete from pgbench_accounts')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'VACUUM')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='delta')
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into pgbench_accounts select * from pgbench_accounts')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='delta')
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@ -530,3 +604,87 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_merge_1(self):
|
||||
"""
|
||||
Create node, take FULL and PAGE backups with old binary,
|
||||
merge them with new binary.
|
||||
old binary version =< 2.2.7
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=1)
|
||||
|
||||
# FULL backup with OLD binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
old_binary=True)
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "10"])
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
# PAGE1 backup with OLD binary
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', old_binary=True)
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'DELETE from pgbench_accounts')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'VACUUM pgbench_accounts')
|
||||
|
||||
# PAGE2 backup with OLD binary
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', old_binary=True)
|
||||
|
||||
# PAGE3 backup with OLD binary
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
backup_type='page', old_binary=True)
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
# merge chain created by old binary with new binary
|
||||
output = self.merge_backup(
|
||||
backup_dir, "node", backup_id)
|
||||
|
||||
# check that in-place is disabled
|
||||
self.assertIn(
|
||||
"WARNING: In-place merge is disabled "
|
||||
"because of program versions mismatch", output)
|
||||
|
||||
# restore merged backup
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node_restored'))
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored, options=["-j", "4"])
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -833,6 +833,9 @@ class ProbackupTest(object):
|
||||
if backup_type:
|
||||
cmd_list += ['-b', backup_type]
|
||||
|
||||
if not old_binary:
|
||||
cmd_list += ['--no-sync']
|
||||
|
||||
return self.run_pb(cmd_list + options, asynchronous, gdb, old_binary, return_id)
|
||||
|
||||
def checkdb_node(
|
||||
@ -889,6 +892,9 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ['-i', backup_id]
|
||||
|
||||
if not old_binary:
|
||||
cmd_list += ['--no-sync']
|
||||
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def show_pb(
|
||||
|
@ -33,7 +33,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -87,7 +87,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -156,7 +156,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -253,7 +253,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
gdb = self.backup_node(
|
||||
backup_dir, 'node', node, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
@ -399,7 +399,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
restore_id) in e.message and
|
||||
'is using backup {0} and still is running'.format(
|
||||
backup_id) in e.message and
|
||||
'ERROR: Cannot lock backup directory' in e.message,
|
||||
'ERROR: Cannot lock backup' in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
|
210
tests/merge.py
210
tests/merge.py
@ -31,7 +31,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
node.slow_start()
|
||||
|
||||
# Do full backup
|
||||
self.backup_node(backup_dir, "node", node)
|
||||
self.backup_node(backup_dir, "node", node, options=['--compress'])
|
||||
show_backup = self.show_pb(backup_dir, "node")[0]
|
||||
|
||||
self.assertEqual(show_backup["status"], "OK")
|
||||
@ -45,7 +45,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
conn.commit()
|
||||
|
||||
# Do first page backup
|
||||
self.backup_node(backup_dir, "node", node, backup_type="page")
|
||||
self.backup_node(backup_dir, "node", node, backup_type="page", options=['--compress'])
|
||||
show_backup = self.show_pb(backup_dir, "node")[1]
|
||||
|
||||
# sanity check
|
||||
@ -60,7 +60,9 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
conn.commit()
|
||||
|
||||
# Do second page backup
|
||||
self.backup_node(backup_dir, "node", node, backup_type="page")
|
||||
self.backup_node(
|
||||
backup_dir, "node", node,
|
||||
backup_type="page", options=['--compress'])
|
||||
show_backup = self.show_pb(backup_dir, "node")[2]
|
||||
page_id = show_backup["id"]
|
||||
|
||||
@ -1047,7 +1049,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file_internal')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(5)
|
||||
@ -1068,7 +1070,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
@unittest.skip("skip")
|
||||
# @unittest.skip("skip")
|
||||
def test_continue_failed_merge_with_corrupted_delta_backup(self):
|
||||
"""
|
||||
Fail merge via gdb, corrupt DELTA backup, try to continue merge
|
||||
@ -1121,7 +1123,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# Failed MERGE
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id, gdb=True)
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file_internal')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.continue_execution_until_break(2)
|
||||
@ -1158,7 +1160,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: Merging of backup {0} failed".format(
|
||||
"ERROR: Backup {0} has status CORRUPT, merge is aborted".format(
|
||||
backup_id) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
@ -1217,8 +1219,12 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb._execute('thread apply all bt')
|
||||
|
||||
gdb.continue_execution_until_break(20)
|
||||
|
||||
gdb._execute('thread apply all bt')
|
||||
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
||||
@ -1234,8 +1240,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
def test_continue_failed_merge_3(self):
|
||||
"""
|
||||
Check that failed MERGE can`t be continued after target backup deleting
|
||||
Create FULL and 2 PAGE backups
|
||||
Check that failed MERGE cannot be continued if intermediate
|
||||
backup is missing.
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -1297,14 +1303,14 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
gdb = self.merge_backup(backup_dir, "node", backup_id_merge, gdb=True)
|
||||
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file_internal')
|
||||
gdb.run_until_break()
|
||||
gdb.continue_execution_until_break(2)
|
||||
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
print(self.show_pb(backup_dir, as_text=True, as_json=False))
|
||||
print(os.path.join(backup_dir, "backups", "node", backup_id_delete))
|
||||
# print(os.path.join(backup_dir, "backups", "node", backup_id_delete))
|
||||
|
||||
# DELETE PAGE1
|
||||
shutil.rmtree(
|
||||
@ -1320,8 +1326,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: Parent full backup for the given backup {0} was not found".format(
|
||||
backup_id_merge) in e.message,
|
||||
"ERROR: Incremental chain is broken, "
|
||||
"merge is impossible to finish" in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
@ -1545,7 +1551,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'backups',
|
||||
'node', full_id, 'database', fsm_path)
|
||||
|
||||
print(file_to_remove)
|
||||
# print(file_to_remove)
|
||||
|
||||
os.remove(file_to_remove)
|
||||
|
||||
@ -1701,9 +1707,6 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
gdb.set_breakpoint('delete_backup_files')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.set_breakpoint('parray_bsearch')
|
||||
gdb.continue_execution_until_break()
|
||||
|
||||
gdb.set_breakpoint('pgFileDelete')
|
||||
gdb.continue_execution_until_break(20)
|
||||
|
||||
@ -1711,7 +1714,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# backup half-merged
|
||||
self.assertEqual(
|
||||
'MERGING', self.show_pb(backup_dir, 'node')[0]['status'])
|
||||
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
|
||||
|
||||
self.assertEqual(
|
||||
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
||||
@ -1731,9 +1734,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: Parent full backup for the given "
|
||||
"backup {0} was not found".format(
|
||||
page_id_2) in e.message,
|
||||
"ERROR: Full backup {0} has unfinished merge with backup {1}".format(
|
||||
full_id, page_id) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
@ -1764,7 +1766,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
page_1 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# Change FULL B backup status to ERROR
|
||||
# Change PAGE1 backup status to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_1, 'ERROR')
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
@ -1773,11 +1775,11 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
|
||||
# take PAGE backup
|
||||
# take PAGE2 backup
|
||||
page_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# Change FULL B backup status to ERROR
|
||||
# Change PAGE1 backup status to OK
|
||||
self.change_backup_status(backup_dir, 'node', page_1, 'OK')
|
||||
|
||||
gdb = self.merge_backup(
|
||||
@ -1787,8 +1789,8 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
gdb.set_breakpoint('delete_backup_files')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.set_breakpoint('parray_bsearch')
|
||||
gdb.continue_execution_until_break()
|
||||
# gdb.set_breakpoint('parray_bsearch')
|
||||
# gdb.continue_execution_until_break()
|
||||
|
||||
gdb.set_breakpoint('pgFileDelete')
|
||||
gdb.continue_execution_until_break(30)
|
||||
@ -1800,6 +1802,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
# restore
|
||||
node.cleanup()
|
||||
try:
|
||||
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
|
||||
self.restore_node(backup_dir, 'node', node)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
@ -1815,6 +1818,158 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_failed_merge_after_delete_2(self):
|
||||
"""
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# take FULL backup
|
||||
full_id = self.backup_node(
|
||||
backup_dir, 'node', node, options=['--stream'])
|
||||
|
||||
node.pgbench_init(scale=1)
|
||||
|
||||
page_1 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# add data
|
||||
pgbench = node.pgbench(options=['-T', '10', '-c', '2', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
|
||||
# take PAGE2 backup
|
||||
page_2 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
gdb = self.merge_backup(
|
||||
backup_dir, 'node', page_2, gdb=True,
|
||||
options=['--log-level-console=VERBOSE'])
|
||||
|
||||
gdb.set_breakpoint('pgFileDelete')
|
||||
gdb.run_until_break()
|
||||
gdb.continue_execution_until_break(2)
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
self.delete_pb(backup_dir, 'node', backup_id=page_2)
|
||||
|
||||
# rerun merge
|
||||
try:
|
||||
#self.restore_node(backup_dir, 'node', node, backup_id=page_1)
|
||||
self.merge_backup(backup_dir, 'node', page_1)
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of backup is missing.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
"ERROR: Full backup {0} has unfinished merge "
|
||||
"with backup {1}".format(full_id, page_2),
|
||||
e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_failed_merge_after_delete_3(self):
|
||||
"""
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# add database
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'CREATE DATABASE testdb')
|
||||
|
||||
dboid = node.safe_psql(
|
||||
"postgres",
|
||||
"select oid from pg_database where datname = 'testdb'").rstrip()
|
||||
|
||||
# take FULL backup
|
||||
full_id = self.backup_node(
|
||||
backup_dir, 'node', node, options=['--stream'])
|
||||
|
||||
# drop database
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'DROP DATABASE testdb')
|
||||
|
||||
# take PAGE backup
|
||||
page_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# create database
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'create DATABASE testdb')
|
||||
|
||||
page_id_2 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
gdb = self.merge_backup(
|
||||
backup_dir, 'node', page_id,
|
||||
gdb=True, options=['--log-level-console=verbose'])
|
||||
|
||||
gdb.set_breakpoint('delete_backup_files')
|
||||
gdb.run_until_break()
|
||||
|
||||
gdb.set_breakpoint('pgFileDelete')
|
||||
gdb.continue_execution_until_break(20)
|
||||
|
||||
gdb._execute('signal SIGKILL')
|
||||
|
||||
# backup half-merged
|
||||
self.assertEqual(
|
||||
'MERGED', self.show_pb(backup_dir, 'node')[0]['status'])
|
||||
|
||||
self.assertEqual(
|
||||
full_id, self.show_pb(backup_dir, 'node')[0]['id'])
|
||||
|
||||
db_path = os.path.join(
|
||||
backup_dir, 'backups', 'node', full_id)
|
||||
|
||||
# FULL backup is missing now
|
||||
shutil.rmtree(db_path)
|
||||
|
||||
try:
|
||||
self.merge_backup(
|
||||
backup_dir, 'node', page_id_2,
|
||||
options=['--log-level-console=verbose'])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of missing parent.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: Failed to find parent full backup for {0}".format(
|
||||
page_id_2) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_merge_backup_from_future(self):
|
||||
"""
|
||||
@ -2085,8 +2240,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertTrue(
|
||||
"ERROR: Parent full backup for the given "
|
||||
"backup {0} was not found".format(
|
||||
"ERROR: Failed to find parent full backup for {0}".format(
|
||||
page_id_a3) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
@ -928,7 +928,8 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
pg_options={
|
||||
'autovacuum': 'off',
|
||||
'checkpoint_timeout': '1h',
|
||||
'wal_level': 'replica'})
|
||||
'wal_level': 'replica',
|
||||
'shared_buffers': '128MB'})
|
||||
|
||||
if self.get_version(master) < self.version_to_num('9.6.0'):
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -966,14 +967,14 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
|
||||
self.switch_wal_segment(master)
|
||||
self.switch_wal_segment(master)
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
master.safe_psql(
|
||||
'postgres',
|
||||
'CREATE TABLE t1 AS '
|
||||
'SELECT i, repeat(md5(i::text),5006056) AS fat_attr '
|
||||
'FROM generate_series(0,10) i')
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
output = self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
|
@ -585,46 +585,22 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
options=['--retention-window=1', '--expired', '--merge-expired'])
|
||||
|
||||
self.assertIn(
|
||||
"Merge incremental chain between FULL backup {0} and backup {1}".format(
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_a, page_id_a2),
|
||||
output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_a1, backup_id_a), output)
|
||||
"Rename merged full backup {0} to {1}".format(
|
||||
backup_id_a, page_id_a2), output)
|
||||
|
||||
self.assertIn(
|
||||
"Rename {0} to {1}".format(
|
||||
backup_id_a, page_id_a1), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_a2, page_id_a1), output)
|
||||
|
||||
self.assertIn(
|
||||
"Rename {0} to {1}".format(
|
||||
page_id_a1, page_id_a2), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merge incremental chain between FULL backup {0} and backup {1}".format(
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_b, page_id_b2),
|
||||
output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_b1, backup_id_b), output)
|
||||
|
||||
self.assertIn(
|
||||
"Rename {0} to {1}".format(
|
||||
backup_id_b, page_id_b1), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_b2, page_id_b1), output)
|
||||
|
||||
self.assertIn(
|
||||
"Rename {0} to {1}".format(
|
||||
page_id_b1, page_id_b2), output)
|
||||
"Rename merged full backup {0} to {1}".format(
|
||||
backup_id_b, page_id_b2), output)
|
||||
|
||||
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
|
||||
|
||||
@ -979,64 +955,295 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
output = self.delete_expired(
|
||||
backup_dir, 'node',
|
||||
options=[
|
||||
'--retention-window=1', '--expired',
|
||||
'--retention-window=1', '--delete-expired',
|
||||
'--merge-expired', '--log-level-console=log'])
|
||||
|
||||
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
|
||||
|
||||
# Merging chain A
|
||||
self.assertIn(
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_a, page_id_a3),
|
||||
output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename merged full backup {0} to {1}".format(
|
||||
backup_id_a, page_id_a3), output)
|
||||
|
||||
# self.assertIn(
|
||||
# "WARNING: Backup {0} has multiple valid descendants. "
|
||||
# "Automatic merge is not possible.".format(
|
||||
# page_id_a1), output)
|
||||
|
||||
self.assertIn(
|
||||
"LOG: Consider backup {0} for purge".format(
|
||||
page_id_a2), output)
|
||||
|
||||
# Merge chain B
|
||||
self.assertIn(
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_b, page_id_b3),
|
||||
output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename merged full backup {0} to {1}".format(
|
||||
backup_id_b, page_id_b3), output)
|
||||
|
||||
self.assertIn(
|
||||
"Delete: {0}".format(page_id_a2), output)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['id'],
|
||||
page_id_b3)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[0]['id'],
|
||||
page_id_a3)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['backup-mode'],
|
||||
'FULL')
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[0]['backup-mode'],
|
||||
'FULL')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_basic_window_merge_multiple_descendants_1(self):
|
||||
"""
|
||||
PAGEb3
|
||||
| PAGEa3
|
||||
-----------------------------retention window
|
||||
PAGEb2 /
|
||||
| PAGEa2 /
|
||||
PAGEb1 \ /
|
||||
| PAGEa1
|
||||
FULLb |
|
||||
FULLa
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'autovacuum': 'off'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=3)
|
||||
|
||||
# Take FULL BACKUPs
|
||||
backup_id_a = self.backup_node(backup_dir, 'node', node)
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
backup_id_b = self.backup_node(backup_dir, 'node', node)
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# Change FULLb backup status to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
||||
|
||||
page_id_a1 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# Change FULLb to OK
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
|
||||
|
||||
# Change PAGEa1 to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a1, 'ERROR')
|
||||
|
||||
# PAGEa1 ERROR
|
||||
# FULLb OK
|
||||
# FULLa OK
|
||||
|
||||
page_id_b1 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# PAGEb1 OK
|
||||
# PAGEa1 ERROR
|
||||
# FULLb OK
|
||||
# FULLa OK
|
||||
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# Change PAGEa1 to OK
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a1, 'OK')
|
||||
|
||||
# Change PAGEb1 and FULLb to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
||||
|
||||
# PAGEb1 ERROR
|
||||
# PAGEa1 OK
|
||||
# FULLb ERROR
|
||||
# FULLa OK
|
||||
|
||||
page_id_a2 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# PAGEa2 OK
|
||||
# PAGEb1 ERROR
|
||||
# PAGEa1 OK
|
||||
# FULLb ERROR
|
||||
# FULLa OK
|
||||
|
||||
# Change PAGEb1 and FULLb to OK
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
|
||||
|
||||
# Change PAGEa2 and FULLa to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a2, 'ERROR')
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_a, 'ERROR')
|
||||
|
||||
# PAGEa2 ERROR
|
||||
# PAGEb1 OK
|
||||
# PAGEa1 OK
|
||||
# FULLb OK
|
||||
# FULLa ERROR
|
||||
|
||||
page_id_b2 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# PAGEb2 OK
|
||||
# PAGEa2 ERROR
|
||||
# PAGEb1 OK
|
||||
# PAGEa1 OK
|
||||
# FULLb OK
|
||||
# FULLa ERROR
|
||||
|
||||
# Change PAGEb2 and PAGEb1 to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b2, 'ERROR')
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b1, 'ERROR')
|
||||
|
||||
# and FULL stuff
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_a, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
|
||||
|
||||
# PAGEb2 ERROR
|
||||
# PAGEa2 ERROR
|
||||
# PAGEb1 ERROR
|
||||
# PAGEa1 OK
|
||||
# FULLb ERROR
|
||||
# FULLa OK
|
||||
|
||||
page_id_a3 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
# pgbench = node.pgbench(options=['-T', '10', '-c', '2'])
|
||||
# pgbench.wait()
|
||||
|
||||
# PAGEa3 OK
|
||||
# PAGEb2 ERROR
|
||||
# PAGEa2 ERROR
|
||||
# PAGEb1 ERROR
|
||||
# PAGEa1 OK
|
||||
# FULLb ERROR
|
||||
# FULLa OK
|
||||
|
||||
# Change PAGEa3 to ERROR
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a3, 'ERROR')
|
||||
|
||||
# Change PAGEb2, PAGEb1 and FULLb to OK
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b2, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', backup_id_b, 'OK')
|
||||
|
||||
page_id_b3 = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page')
|
||||
|
||||
# PAGEb3 OK
|
||||
# PAGEa3 ERROR
|
||||
# PAGEb2 OK
|
||||
# PAGEa2 ERROR
|
||||
# PAGEb1 OK
|
||||
# PAGEa1 OK
|
||||
# FULLb OK
|
||||
# FULLa OK
|
||||
|
||||
# Change PAGEa3, PAGEa2 and PAGEb1 status to OK
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a3, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', page_id_a2, 'OK')
|
||||
self.change_backup_status(backup_dir, 'node', page_id_b1, 'OK')
|
||||
|
||||
# PAGEb3 OK
|
||||
# PAGEa3 OK
|
||||
# PAGEb2 OK
|
||||
# PAGEa2 OK
|
||||
# PAGEb1 OK
|
||||
# PAGEa1 OK
|
||||
# FULLb OK
|
||||
# FULLa OK
|
||||
|
||||
# Check that page_id_a3 and page_id_a2 are both direct descendants of page_id_a1
|
||||
self.assertEqual(
|
||||
self.show_pb(
|
||||
backup_dir, 'node', backup_id=page_id_a3)['parent-backup-id'],
|
||||
page_id_a1)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(
|
||||
backup_dir, 'node', backup_id=page_id_a2)['parent-backup-id'],
|
||||
page_id_a1)
|
||||
|
||||
# Purge backups
|
||||
backups = os.path.join(backup_dir, 'backups', 'node')
|
||||
for backup in os.listdir(backups):
|
||||
if backup in [page_id_a3, page_id_b3, 'pg_probackup.conf']:
|
||||
continue
|
||||
|
||||
with open(
|
||||
os.path.join(
|
||||
backups, backup, "backup.control"), "a") as conf:
|
||||
conf.write("recovery_time='{:%Y-%m-%d %H:%M:%S}'\n".format(
|
||||
datetime.now() - timedelta(days=3)))
|
||||
|
||||
output = self.delete_expired(
|
||||
backup_dir, 'node',
|
||||
options=[
|
||||
'--retention-window=1',
|
||||
'--merge-expired', '--log-level-console=log'])
|
||||
|
||||
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3)
|
||||
|
||||
# Merging chain A
|
||||
self.assertIn(
|
||||
"Merge incremental chain between FULL backup {0} and backup {1}".format(
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_a, page_id_a3),
|
||||
output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_a1, backup_id_a), output)
|
||||
"INFO: Rename merged full backup {0} to {1}".format(
|
||||
backup_id_a, page_id_a3), output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename {0} to {1}".format(
|
||||
backup_id_a, page_id_a1), output)
|
||||
|
||||
self.assertIn(
|
||||
"WARNING: Backup {0} has multiple valid descendants. "
|
||||
"Automatic merge is not possible.".format(
|
||||
page_id_a1), output)
|
||||
# self.assertIn(
|
||||
# "WARNING: Backup {0} has multiple valid descendants. "
|
||||
# "Automatic merge is not possible.".format(
|
||||
# page_id_a1), output)
|
||||
|
||||
# Merge chain B
|
||||
self.assertIn(
|
||||
"Merge incremental chain between FULL backup {0} and backup {1}".format(
|
||||
backup_id_b, page_id_b3),
|
||||
output)
|
||||
"Merge incremental chain between full backup {0} and backup {1}".format(
|
||||
backup_id_b, page_id_b3), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_b1, backup_id_b), output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename {0} to {1}".format(
|
||||
backup_id_b, page_id_b1), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_b2, page_id_b1), output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename {0} to {1}".format(
|
||||
page_id_b1, page_id_b2), output)
|
||||
|
||||
self.assertIn(
|
||||
"Merging backup {0} with backup {1}".format(
|
||||
page_id_b3, page_id_b2), output)
|
||||
|
||||
self.assertIn(
|
||||
"INFO: Rename {0} to {1}".format(
|
||||
page_id_b2, page_id_b3), output)
|
||||
|
||||
# this backup deleted because it is not guarded by retention
|
||||
self.assertIn(
|
||||
"INFO: Delete: {0}".format(
|
||||
page_id_a1), output)
|
||||
"INFO: Rename merged full backup {0} to {1}".format(
|
||||
backup_id_b, page_id_b3), output)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[2]['id'],
|
||||
@ -1048,7 +1255,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[0]['id'],
|
||||
page_id_a1)
|
||||
page_id_a2)
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[2]['backup-mode'],
|
||||
@ -1056,11 +1263,17 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[1]['backup-mode'],
|
||||
'PAGE')
|
||||
'FULL')
|
||||
|
||||
self.assertEqual(
|
||||
self.show_pb(backup_dir, 'node')[0]['backup-mode'],
|
||||
'FULL')
|
||||
'PAGE')
|
||||
|
||||
output = self.delete_expired(
|
||||
backup_dir, 'node',
|
||||
options=[
|
||||
'--retention-window=1',
|
||||
'--delete-expired', '--log-level-console=log'])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1596,7 +1809,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
# create pair of MERGING backup as a result of failed merge
|
||||
gdb = self.merge_backup(
|
||||
backup_dir, 'node', delta_id, gdb=True)
|
||||
gdb.set_breakpoint('copy_file')
|
||||
gdb.set_breakpoint('backup_non_data_file')
|
||||
gdb.run_until_break()
|
||||
gdb.continue_execution_until_break(2)
|
||||
gdb._execute('signal SIGKILL')
|
||||
@ -2491,10 +2704,14 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertTrue(show_tli4_before)
|
||||
self.assertTrue(show_tli5_before)
|
||||
|
||||
sleep(5)
|
||||
|
||||
output = self.delete_pb(
|
||||
backup_dir, 'node',
|
||||
options=['--delete-wal', '--wal-depth=2', '--log-level-console=verbose'])
|
||||
|
||||
# print(output)
|
||||
|
||||
show_tli1_after = self.show_archive(backup_dir, 'node', tli=1)
|
||||
show_tli2_after = self.show_archive(backup_dir, 'node', tli=2)
|
||||
show_tli3_after = self.show_archive(backup_dir, 'node', tli=3)
|
||||
|
Loading…
Reference in New Issue
Block a user