1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-26 11:54:25 +02:00

Merge branch 'master' into issue_90

This commit is contained in:
Grigory Smolkin 2019-07-04 19:35:02 +03:00
commit 080988bbda
17 changed files with 1330 additions and 637 deletions

File diff suppressed because it is too large Load Diff

View File

@ -110,7 +110,7 @@ gen_probackup_project.pl C:\path_to_postgresql_source_tree
## Documentation
Currently the latest documentation can be found at [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
Currently the latest documentation can be found at [github](https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md) and [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
## Licence

View File

@ -344,6 +344,9 @@ do_backup_instance(PGconn *backup_conn)
dir_list_file(backup_files_list, parray_get(external_dirs, i),
false, true, false, i+1, FIO_DB_HOST);
/* close ssh session in main thread */
fio_disconnect();
/* Sanity check for backup_files_list, thank you, Windows:
* https://github.com/postgrespro/pg_probackup/issues/48
*/
@ -512,6 +515,9 @@ do_backup_instance(PGconn *backup_conn)
parray_free(prev_backup_filelist);
}
/* Notify end of backup */
pg_stop_backup(&current, pg_startbackup_conn);
/* In case of backup from replica >= 9.6 we must fix minRecPoint,
* First we must find pg_control in backup_files_list.
*/
@ -532,13 +538,16 @@ do_backup_instance(PGconn *backup_conn)
break;
}
}
if (!pg_control)
elog(ERROR, "Failed to find file \"%s\" in backup filelist.",
pg_control_path);
set_min_recovery_point(pg_control, database_path, current.stop_lsn);
}
/* Notify end of backup */
pg_stop_backup(&current, pg_startbackup_conn);
if (current.from_replica && !exclusive_backup)
set_min_recovery_point(pg_control, database_path, current.stop_lsn);
/* close ssh session in main thread */
fio_disconnect();
/* Add archived xlog files into the list of files of this backup */
if (stream_wal)
@ -2143,6 +2152,9 @@ backup_files(void *arg)
elog(WARNING, "unexpected file type %d", buf.st_mode);
}
/* ssh connection to longer needed */
fio_disconnect();
/* Close connection */
if (arguments->conn_arg.conn)
pgut_disconnect(arguments->conn_arg.conn);

View File

@ -356,7 +356,7 @@ prepare_page(ConnectionArgs *arguments,
((strict && !is_ptrack_support) || !strict))
{
/* show this message for checkdb or backup without ptrack support */
elog(WARNING, "CORRUPTION in file %s, block %u",
elog(WARNING, "Corruption detected in file \"%s\", block %u",
file->path, blknum);
}
@ -585,10 +585,7 @@ backup_data_file(backup_files_arg* arguments,
}
if (file->size % BLCKSZ != 0)
{
fio_fclose(in);
elog(WARNING, "File: %s, invalid file size %zu", file->path, file->size);
}
elog(WARNING, "File: \"%s\", invalid file size %zu", file->path, file->size);
/*
* Compute expected number of blocks in the file.
@ -625,7 +622,7 @@ backup_data_file(backup_files_arg* arguments,
if (rc == PAGE_CHECKSUM_MISMATCH && is_ptrack_support)
goto RetryUsingPtrack;
if (rc < 0)
elog(ERROR, "Failed to read file %s: %s",
elog(ERROR, "Failed to read file \"%s\": %s",
file->path, rc == PAGE_CHECKSUM_MISMATCH ? "data file checksum mismatch" : strerror(-rc));
n_blocks_read = rc;
}
@ -1212,10 +1209,7 @@ check_data_file(ConnectionArgs *arguments,
}
if (file->size % BLCKSZ != 0)
{
fclose(in);
elog(WARNING, "File: %s, invalid file size %zu", file->path, file->size);
}
elog(WARNING, "File: \"%s\", invalid file size %zu", file->path, file->size);
/*
* Compute expected number of blocks in the file.

View File

@ -209,7 +209,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
time_t days_threshold = 0;
/* For fancy reporting */
float actual_window = 0;
uint32 actual_window = 0;
/* Get current time */
current_time = time(NULL);
@ -252,7 +252,9 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
cur_full_backup_num++;
}
/* Check if backup in needed by retention policy */
/* Check if backup in needed by retention policy
* TODO: consider that ERROR backup most likely to have recovery_time == 0
*/
if ((days_threshold == 0 || (days_threshold > backup->recovery_time)) &&
(instance_config.retention_redundancy <= (n_full_backups - cur_full_backup_num)))
{
@ -324,7 +326,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
}
/* Message about retention state of backups
* TODO: Float is ugly, rewrite somehow.
* TODO: message is ugly, rewrite it to something like show table in stdout.
*/
cur_full_backup_num = 1;
@ -340,9 +342,9 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
if (backup->recovery_time == 0)
actual_window = 0;
else
actual_window = ((float)current_time - (float)backup->recovery_time)/(60 * 60 * 24);
actual_window = (current_time - backup->recovery_time)/(60 * 60 * 24);
elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %.2fd/%ud. %s",
elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s",
base36enc(backup->start_time),
pgBackupGetBackupMode(backup),
status2str(backup->status),
@ -801,10 +803,13 @@ delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
int
do_delete_instance(void)
{
parray *backup_list;
int i;
parray *backup_list;
parray *xlog_files_list;
int i;
int rc;
char instance_config_path[MAXPGPATH];
/* Delete all backups. */
backup_list = catalog_get_backup_list(INVALID_BACKUP_ID);
@ -821,23 +826,40 @@ do_delete_instance(void)
parray_free(backup_list);
/* Delete all wal files. */
delete_walfiles(InvalidXLogRecPtr, 0, instance_config.xlog_seg_size);
xlog_files_list = parray_new();
dir_list_file(xlog_files_list, arclog_path, false, false, false, 0, FIO_BACKUP_HOST);
for (i = 0; i < parray_num(xlog_files_list); i++)
{
pgFile *wal_file = (pgFile *) parray_get(xlog_files_list, i);
if (S_ISREG(wal_file->mode))
{
rc = unlink(wal_file->path);
if (rc != 0)
elog(WARNING, "Failed to remove file \"%s\": %s",
wal_file->path, strerror(errno));
}
}
/* Cleanup */
parray_walk(xlog_files_list, pgFileFree);
parray_free(xlog_files_list);
/* Delete backup instance config file */
join_path_components(instance_config_path, backup_instance_path, BACKUP_CATALOG_CONF_FILE);
if (remove(instance_config_path))
{
elog(ERROR, "can't remove \"%s\": %s", instance_config_path,
elog(ERROR, "Can't remove \"%s\": %s", instance_config_path,
strerror(errno));
}
/* Delete instance root directories */
if (rmdir(backup_instance_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", backup_instance_path,
elog(ERROR, "Can't remove \"%s\": %s", backup_instance_path,
strerror(errno));
if (rmdir(arclog_path) != 0)
elog(ERROR, "can't remove \"%s\": %s", arclog_path,
elog(ERROR, "Can't remove \"%s\": %s", arclog_path,
strerror(errno));
elog(INFO, "Instance '%s' successfully deleted", instance_name);

View File

@ -122,7 +122,7 @@ static int BlackListCompare(const void *str1, const void *str2);
static char dir_check_file(pgFile *file);
static void dir_list_file_internal(parray *files, pgFile *parent, bool exclude,
bool omit_symlink, parray *black_list,
bool follow_symlink, parray *black_list,
int external_dir_num, fio_location location);
static void opt_path_map(ConfigOption *opt, const char *arg,
TablespaceList *list, const char *type);
@ -159,14 +159,14 @@ dir_create_dir(const char *dir, mode_t mode)
}
pgFile *
pgFileNew(const char *path, const char *rel_path, bool omit_symlink,
pgFileNew(const char *path, const char *rel_path, bool follow_symlink,
int external_dir_num, fio_location location)
{
struct stat st;
pgFile *file;
/* stat the file */
if (fio_stat(path, &st, omit_symlink, location) < 0)
if (fio_stat(path, &st, follow_symlink, location) < 0)
{
/* file not found is not an error case */
if (errno == ENOENT)
@ -445,11 +445,11 @@ BlackListCompare(const void *str1, const void *str2)
* List files, symbolic links and directories in the directory "root" and add
* pgFile objects to "files". We add "root" to "files" if add_root is true.
*
* When omit_symlink is true, symbolic link is ignored and only file or
* When follow_symlink is true, symbolic link is ignored and only file or
* directory linked to will be listed.
*/
void
dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink,
bool add_root, int external_dir_num, fio_location location)
{
pgFile *file;
@ -490,7 +490,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
parray_qsort(black_list, BlackListCompare);
}
file = pgFileNew(root, "", omit_symlink, external_dir_num, location);
file = pgFileNew(root, "", follow_symlink, external_dir_num, location);
if (file == NULL)
{
/* For external directory this is not ok */
@ -512,7 +512,7 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
if (add_root)
parray_append(files, file);
dir_list_file_internal(files, file, exclude, omit_symlink, black_list,
dir_list_file_internal(files, file, exclude, follow_symlink, black_list,
external_dir_num, location);
if (!add_root)
@ -731,7 +731,7 @@ dir_check_file(pgFile *file)
*/
static void
dir_list_file_internal(parray *files, pgFile *parent, bool exclude,
bool omit_symlink, parray *black_list,
bool follow_symlink, parray *black_list,
int external_dir_num, fio_location location)
{
DIR *dir;
@ -764,7 +764,7 @@ dir_list_file_internal(parray *files, pgFile *parent, bool exclude,
join_path_components(child, parent->path, dent->d_name);
join_path_components(rel_child, parent->rel_path, dent->d_name);
file = pgFileNew(child, rel_child, omit_symlink, external_dir_num,
file = pgFileNew(child, rel_child, follow_symlink, external_dir_num,
location);
if (file == NULL)
continue;
@ -821,7 +821,7 @@ dir_list_file_internal(parray *files, pgFile *parent, bool exclude,
* recursively.
*/
if (S_ISDIR(file->mode))
dir_list_file_internal(files, file, exclude, omit_symlink,
dir_list_file_internal(files, file, exclude, follow_symlink,
black_list, external_dir_num, location);
}

View File

@ -97,9 +97,11 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--format=format]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-C]\n"));
@ -126,6 +128,7 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n"));
@ -143,6 +146,7 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-i backup-id] [--progress] [-j num-threads]\n"));
@ -151,22 +155,27 @@ help_pg_probackup(void)
printf(_(" [--recovery-target-timeline=timeline]\n"));
printf(_(" [--recovery-target-name=target-name]\n"));
printf(_(" [--skip-block-validation]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME);
printf(_(" [-D pgdata-path] [--progress] [-j num-threads]\n"));
printf(_(" [--amcheck] [--skip-block-validation]\n"));
printf(_(" [--heapallindexed]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME);
printf(_(" [--instance=instance_name [-i backup-id]]\n"));
printf(_(" [--format=format]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" [--wal] [-i backup-id | --expired | --merge-expired]\n"));
printf(_(" [--dry-run]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" -i backup-id [--progress] [-j num-threads]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
@ -174,9 +183,11 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s del-instance -B backup-path\n"), PROGRAM_NAME);
printf(_(" --instance=instance_name\n"));
printf(_(" [--help]\n"));
printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
@ -188,6 +199,7 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME);
printf(_(" --wal-file-path=wal-file-path\n"));
@ -195,6 +207,7 @@ help_pg_probackup(void)
printf(_(" [--remote-proto] [--remote-host]\n"));
printf(_(" [--remote-port] [--remote-path] [--remote-user]\n"));
printf(_(" [--ssh-options]\n"));
printf(_(" [--help]\n"));
if ((PROGRAM_URL || PROGRAM_EMAIL))
{

View File

@ -577,6 +577,7 @@ extern bool in_backup_list(parray *backup_list, pgBackup *target_backup);
extern int get_backup_index_number(parray *backup_list, pgBackup *backup);
extern bool launch_agent(void);
extern void launch_ssh(char* argv[]);
extern void wait_ssh(void);
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
#define COMPRESS_LEVEL_DEFAULT 1
@ -586,7 +587,7 @@ extern const char* deparse_compress_alg(int alg);
/* in dir.c */
extern void dir_list_file(parray *files, const char *root, bool exclude,
bool omit_symlink, bool add_root, int external_dir_num, fio_location location);
bool follow_symlink, bool add_root, int external_dir_num, fio_location location);
extern void create_data_directories(parray *dest_files,
const char *data_dir,
@ -619,7 +620,7 @@ extern bool fileExists(const char *path, fio_location location);
extern size_t pgFileSize(const char *path);
extern pgFile *pgFileNew(const char *path, const char *rel_path,
bool omit_symlink, int external_dir_num,
bool follow_symlink, int external_dir_num,
fio_location location);
extern pgFile *pgFileInit(const char *path, const char *rel_path);
extern void pgFileDelete(pgFile *file);

View File

@ -333,6 +333,21 @@ int fio_open(char const* path, int mode, fio_location location)
return fd;
}
/* Close ssh session */
void
fio_disconnect(void)
{
if (fio_stdin)
{
SYS_CHECK(close(fio_stdin));
SYS_CHECK(close(fio_stdout));
fio_stdin = 0;
fio_stdout = 0;
wait_ssh();
}
}
/* Open stdio file */
FILE* fio_fopen(char const* path, char const* mode, fio_location location)
{
@ -340,14 +355,30 @@ FILE* fio_fopen(char const* path, char const* mode, fio_location location)
if (fio_is_remote(location))
{
int flags = O_RDWR|O_CREAT;
int flags = 0;
int fd;
if (strcmp(mode, PG_BINARY_W) == 0) {
flags |= O_TRUNC|PG_BINARY;
} else if (strncmp(mode, PG_BINARY_R, strlen(PG_BINARY_R)) == 0) {
flags |= PG_BINARY;
flags = O_TRUNC|PG_BINARY|O_RDWR|O_CREAT;
} else if (strcmp(mode, "w") == 0) {
flags = O_TRUNC|O_RDWR|O_CREAT;
} else if (strcmp(mode, PG_BINARY_R) == 0) {
flags = O_RDONLY|PG_BINARY;
} else if (strcmp(mode, "r") == 0) {
flags = O_RDONLY;
} else if (strcmp(mode, PG_BINARY_R "+") == 0) {
/* stdio fopen("rb+") actually doesn't create unexisted file, but probackup frequently
* needs to open existed file or create new one if not exists.
* In stdio it can be done using two fopen calls: fopen("r+") and if failed then fopen("w").
* But to eliminate extra call which especially critical in case of remote connection
* we change r+ semantic to create file if not exists.
*/
flags = O_RDWR|O_CREAT|PG_BINARY;
} else if (strcmp(mode, "r+") == 0) { /* see comment above */
flags |= O_RDWR|O_CREAT;
} else if (strcmp(mode, "a") == 0) {
flags |= O_APPEND;
flags |= O_CREAT|O_RDWR|O_APPEND;
} else {
Assert(false);
}
fd = fio_open(path, flags, location);
if (fd >= 0)
@ -632,7 +663,7 @@ int fio_fstat(int fd, struct stat* st)
}
/* Get information about file */
int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_location location)
int fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location location)
{
if (fio_is_remote(location))
{
@ -641,7 +672,7 @@ int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_locati
hdr.cop = FIO_STAT;
hdr.handle = -1;
hdr.arg = follow_symlinks;
hdr.arg = follow_symlink;
hdr.size = path_len;
IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr));
@ -660,7 +691,7 @@ int fio_stat(char const* path, struct stat* st, bool follow_symlinks, fio_locati
}
else
{
return follow_symlinks ? stat(path, st) : lstat(path, st);
return follow_symlink ? stat(path, st) : lstat(path, st);
}
}
@ -1148,8 +1179,10 @@ int fio_send_pages(FILE* in, FILE* out, pgFile *file,
IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr));
Assert(hdr.cop == FIO_PAGE);
if (hdr.arg < 0) /* read error */
return hdr.arg;
if ((int)hdr.arg < 0) /* read error */
{
return (int)hdr.arg;
}
blknum = hdr.arg;
if (hdr.size == 0) /* end of segment */
@ -1205,7 +1238,7 @@ static void fio_send_pages_impl(int fd, int out, fio_send_request* req)
{
hdr.arg = -errno;
hdr.size = 0;
Assert(hdr.arg < 0);
Assert((int)hdr.arg < 0);
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
}
else

View File

@ -90,6 +90,7 @@ extern int fio_seek(int fd, off_t offs);
extern int fio_fstat(int fd, struct stat* st);
extern int fio_truncate(int fd, off_t size);
extern int fio_close(int fd);
extern void fio_disconnect(void);
extern int fio_rename(char const* old_path, char const* new_path, fio_location location);
extern int fio_symlink(char const* target, char const* link_path, fio_location location);

View File

@ -5,6 +5,12 @@
#include <sys/wait.h>
#include <signal.h>
#ifdef WIN32
#define __thread __declspec(thread)
#else
#include <pthread.h>
#endif
#include "pg_probackup.h"
#include "file.h"
@ -52,7 +58,8 @@ static int split_options(int argc, char* argv[], int max_options, char* options)
return argc;
}
static int child_pid;
static __thread int child_pid;
#if 0
static void kill_child(void)
{
@ -60,6 +67,14 @@ static void kill_child(void)
}
#endif
void wait_ssh(void)
{
int status;
waitpid(child_pid, &status, 0);
elog(LOG, "SSH process %d is terminated with status %d", child_pid, status);
}
#ifdef WIN32
void launch_ssh(char* argv[])
{

View File

@ -12,8 +12,9 @@ from . import init, merge, option, show, compatibility, \
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
if os.environ['PG_PROBACKUP_TEST_BASIC'] == 'ON':
loader.testMethodPrefix = 'test_basic'
if 'PG_PROBACKUP_TEST_BASIC' in os.environ:
if os.environ['PG_PROBACKUP_TEST_BASIC'] == 'ON':
loader.testMethodPrefix = 'test_basic'
# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))

View File

@ -452,22 +452,23 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
filename = filename_orig + '.partial'
file = os.path.join(wals_dir, filename)
# emulate stale .partial file
with open(file, 'a') as f:
f.write(b"blahblah")
f.flush()
f.close()
self.switch_wal_segment(node)
sleep(15)
sleep(20)
# check that segment is archived
if self.archive_compress:
filename_orig = filename_orig + '.gz'
file = os.path.join(wals_dir, filename_orig)
self.assertTrue(os.path.isfile(file))
# successful validate means that archive-push reused stale wal segment
self.validate_pb(
backup_dir, 'node',
options=['--recovery-target-xid={0}'.format(xid)])

View File

@ -423,6 +423,123 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_detect_corruption(self):
"""make node, corrupt some page, check that backup failed"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,1000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.seek(9000)
f.write(b"bla")
f.flush()
f.close
try:
self.backup_node(
backup_dir, 'node', node,
backup_type="full", options=["-j", "4", "--stream"])
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because tablespace mapping is incorrect"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd))
except ProbackupException as e:
if self.remote:
self.assertTrue(
"ERROR: Failed to read file" in e.message and
"data file checksum mismatch" in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
else:
self.assertIn(
'WARNING: Corruption detected in file',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
self.assertIn(
'ERROR: Data file corruption',
e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_truncate_misaligned(self):
"""
make node, truncate file to size not even to BLCKSIZE,
take backup
"""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
"postgres",
"create table t_heap as select 1 as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,100000) i")
node.safe_psql(
"postgres",
"CHECKPOINT;")
heap_path = node.safe_psql(
"postgres",
"select pg_relation_filepath('t_heap')").rstrip()
heap_size = node.safe_psql(
"postgres",
"select pg_relation_size('t_heap')")
with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
f.truncate(int(heap_size) - 4096)
f.flush()
f.close
output = self.backup_node(
backup_dir, 'node', node, backup_type="full",
options=["-j", "4", "--stream"], return_id=False)
self.assertIn("WARNING: File", output)
self.assertIn("invalid file size", output)
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_in_pgdata_pgpro_1376(self):
"""PGPRO-1376 """
@ -1365,3 +1482,113 @@ class BackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_with_least_privileges_role(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
# pg_options={'ptrack_enable': 'on'},
initdb_params=['--data-checksums'],
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
node.safe_psql(
'postgres',
'CREATE DATABASE backupdb')
node.safe_psql(
'backupdb',
"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
"REVOKE ALL ON SCHEMA public from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
"CREATE ROLE backup WITH LOGIN REPLICATION; "
"GRANT CONNECT ON DATABASE backupdb to backup; "
"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
# for partial restore, checkdb and ptrack
"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
# for exclusive backup for PG 9.5 and ptrack
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
)
# ptrack functions
# for fname in [
# 'oideq(oid, oid)',
# 'ptrack_version()',
# 'pg_ptrack_clear()',
# 'pg_ptrack_control_lsn()',
# 'pg_ptrack_get_and_clear_db(oid, oid)',
# 'pg_ptrack_get_and_clear(oid, oid)',
# 'pg_ptrack_get_block_2(oid, oid, oid, bigint)']:
# try:
# node.safe_psql(
# "backupdb",
# "GRANT EXECUTE ON FUNCTION pg_catalog.{0} "
# "TO backup".format(fname))
# except:
# pass
# FULL backup
self.backup_node(
backup_dir, 'node', node,
datname='backupdb', options=['--stream', '-U', 'backup'])
self.backup_node(
backup_dir, 'node', node,
datname='backupdb', options=['-U', 'backup'])
# PAGE
self.backup_node(
backup_dir, 'node', node, backup_type='page',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'node', node, backup_type='page', datname='backupdb',
options=['--stream', '-U', 'backup'])
# DELTA
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
datname='backupdb', options=['-U', 'backup'])
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
datname='backupdb', options=['--stream', '-U', 'backup'])
# PTRACK
# self.backup_node(
# backup_dir, 'node', node, backup_type='ptrack',
# datname='backupdb', options=['-U', 'backup'])
# self.backup_node(
# backup_dir, 'node', node, backup_type='ptrack',
# datname='backupdb', options=['--stream', '-U', 'backup'])
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -532,49 +532,3 @@ class CompatibilityTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_concurrent_drop_table(self):
""""""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node, old_binary=True)
node.slow_start()
node.pgbench_init(scale=1)
# FULL backup
gdb = self.backup_node(
backup_dir, 'node', node,
options=['--stream', '--compress', '--log-level-file=VERBOSE'],
gdb=True, old_binary=True)
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
node.safe_psql(
'postgres',
'DROP TABLE pgbench_accounts')
# do checkpoint to guarantee filenode removal
node.safe_psql(
'postgres',
'CHECKPOINT')
gdb.remove_all_breakpoints()
gdb.continue_execution_until_exit()
# show_backup = self.show_pb(backup_dir, 'node')[0]
# self.assertEqual(show_backup['status'], "OK")
# validate with fresh binary, it MUST be successful
self.validate_pb(backup_dir)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -437,3 +437,91 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_replica_promote(self):
"""
start backup from replica, during backup promote replica
check that backup is failed
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
master = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '10s',
'checkpoint_timeout': '30s',
'max_wal_size': '32MB'})
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
self.set_archiving(backup_dir, 'master', master)
master.slow_start()
replica = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'replica'))
replica.cleanup()
self.backup_node(backup_dir, 'master', master)
master.psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,165000) i")
self.restore_node(
backup_dir, 'master', replica, options=['-R'])
# Settings for Replica
self.add_instance(backup_dir, 'replica', replica)
self.set_archiving(backup_dir, 'replica', replica, replica=True)
self.set_replica(
master, replica,
replica_name='replica', synchronous=True)
replica.slow_start(replica=True)
master.psql(
"postgres",
"create table t_heap_1 as select i as id, md5(i::text) as text, "
"md5(repeat(i::text,10))::tsvector as tsvector "
"from generate_series(0,165000) i")
self.wait_until_replica_catch_with_master(master, replica)
# start backup from replica
gdb = self.backup_node(
backup_dir, 'replica', replica, gdb=True,
options=['--log-level-file=verbose'])
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
gdb.continue_execution_until_break(20)
replica.promote()
gdb.remove_all_breakpoints()
gdb.continue_execution_until_exit()
backup_id = self.show_pb(
backup_dir, 'replica')[0]["id"]
# read log file content
with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
log_content = f.read()
f.close
self.assertIn(
'ERROR: the standby was promoted during online backup',
log_content)
self.assertIn(
'WARNING: Backup {0} is running, '
'setting its status to ERROR'.format(backup_id),
log_content)
# Clean after yourself
self.del_test_dir(module_name, fname)

View File

@ -2,6 +2,7 @@ import os
import unittest
from datetime import datetime, timedelta
from .helpers.ptrack_helpers import ProbackupTest
from time import sleep
module_name = 'retention'
@ -1233,4 +1234,46 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
backup_dir, 'node', node, backup_type='page')
# Change FULLb backup status to ERROR
# self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR')
def test_retention_redundancy_overlapping_chains(self):
""""""
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
initdb_params=['--data-checksums'])
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
self.set_config(
backup_dir, 'node', options=['--retention-redundancy=1'])
# Make backups to be purged
self.backup_node(backup_dir, 'node', node)
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Make backups to be keeped
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_files')
gdb.run_until_break()
sleep(1)
self.backup_node(backup_dir, 'node', node, backup_type="page")
gdb.remove_all_breakpoints()
gdb.continue_execution_until_exit()
self.backup_node(backup_dir, 'node', node, backup_type="page")
# Purge backups
log = self.delete_expired(
backup_dir, 'node', options=['--expired', '--wal'])
self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2)
# Clean after yourself
self.del_test_dir(module_name, fname)