1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-01-23 11:45:36 +02:00

Merge branch 'master' into PGPRO-421

This commit is contained in:
Sergey Cherkashin 2018-12-17 20:12:36 +03:00
commit d59d7e74bc
15 changed files with 664 additions and 141 deletions

View File

@ -27,7 +27,7 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
char backup_wal_file_path[MAXPGPATH];
char absolute_wal_file_path[MAXPGPATH];
char current_dir[MAXPGPATH];
int64 system_id;
uint64 system_id;
bool is_compress = false;
if (wal_file_name == NULL && wal_file_path == NULL)
@ -50,7 +50,7 @@ do_archive_push(char *wal_file_path, char *wal_file_name, bool overwrite)
if(system_id != instance_config.system_identifier)
elog(ERROR, "Refuse to push WAL segment %s into archive. Instance parameters mismatch."
"Instance '%s' should have SYSTEM_ID = %ld instead of %ld",
"Instance '%s' should have SYSTEM_ID = " UINT64_FORMAT " instead of " UINT64_FORMAT,
wal_file_name, instance_name, instance_config.system_identifier,
system_id);

View File

@ -847,23 +847,6 @@ pgBackupInit(pgBackup *backup)
backup->extra_dir_str = NULL;
}
/*
* Copy backup metadata from **src** into **dst**.
*/
void
pgBackupCopy(pgBackup *dst, pgBackup *src)
{
pfree(dst->primary_conninfo);
pfree(dst->extra_dir_str);
memcpy(dst, src, sizeof(pgBackup));
if (src->primary_conninfo)
dst->primary_conninfo = pstrdup(src->primary_conninfo);
if (src->extra_dir_str)
dst->extra_dir_str = pstrdup(src->extra_dir_str);
}
/* free pgBackup object */
void
pgBackupFree(void *backup)

View File

@ -232,8 +232,8 @@ read_page_from_file(pgFile *file, BlockNumber blknum,
return 0;
}
else
elog(WARNING, "File: %s, block %u, expected block size %d,"
"but read %lu, try again",
elog(WARNING, "File: %s, block %u, expected block size %u,"
"but read %zu, try again",
file->path, blknum, BLCKSZ, read_len);
}
@ -382,7 +382,7 @@ prepare_page(backup_files_arg *arguments,
else if (page_size != BLCKSZ)
{
free(ptrack_page);
elog(ERROR, "File: %s, block %u, expected block size %d, but read %lu",
elog(ERROR, "File: %s, block %u, expected block size %d, but read %zu",
file->path, absolute_blknum, BLCKSZ, page_size);
}
else
@ -574,7 +574,7 @@ backup_data_file(backup_files_arg* arguments,
if (file->size % BLCKSZ != 0)
{
fclose(in);
elog(ERROR, "File: %s, invalid file size %lu", file->path, file->size);
elog(ERROR, "File: %s, invalid file size %zu", file->path, file->size);
}
/*
@ -789,7 +789,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
read_len = fread(compressed_page.data, 1,
MAXALIGN(header.compressed_size), in);
if (read_len != MAXALIGN(header.compressed_size))
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
elog(ERROR, "cannot read block %u of \"%s\" read %zu of %d",
blknum, file->path, read_len, header.compressed_size);
/*
@ -829,6 +829,8 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
if (write_header)
{
/* We uncompressed the page, so its size is BLCKSZ */
header.compressed_size = BLCKSZ;
if (fwrite(&header, 1, sizeof(header), out) != sizeof(header))
elog(ERROR, "cannot write header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno));
@ -1592,19 +1594,23 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
if (read_len == 0 && feof(in))
break; /* EOF found */
else if (read_len != 0 && feof(in))
elog(ERROR,
elog(WARNING,
"odd size page found at block %u of \"%s\"",
blknum, file->path);
else
elog(ERROR, "cannot read header of block %u of \"%s\": %s",
elog(WARNING, "cannot read header of block %u of \"%s\": %s",
blknum, file->path, strerror(errno_tmp));
return false;
}
COMP_FILE_CRC32(use_crc32c, crc, &header, read_len);
if (header.block < blknum)
elog(ERROR, "backup is broken at file->path %s block %u",
{
elog(WARNING, "backup is broken at file->path %s block %u",
file->path, blknum);
return false;
}
blknum = header.block;
@ -1620,8 +1626,11 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
read_len = fread(compressed_page.data, 1,
MAXALIGN(header.compressed_size), in);
if (read_len != MAXALIGN(header.compressed_size))
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
{
elog(WARNING, "cannot read block %u of \"%s\" read %zu of %d",
blknum, file->path, read_len, header.compressed_size);
return false;
}
COMP_FILE_CRC32(use_crc32c, crc, compressed_page.data, read_len);
@ -1648,11 +1657,13 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version,
is_valid = false;
continue;
}
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
elog(WARNING, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
file->path, uncompressed_size);
return false;
}
if (validate_one_page(page.data, file, blknum,
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
is_valid = false;
}
else

View File

@ -211,7 +211,7 @@ pgFileInit(const char *path)
strcpy(file->path, path); /* enough buffer size guaranteed */
/* Get file name from the path */
file_name = strrchr(file->path, '/');
file_name = last_dir_separator(file->path);
if (file_name == NULL)
file->name = file->path;
else
@ -474,8 +474,8 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list,
extra_dir_num);
if (black_list)
free_dir_list(black_list);
if (!add_root)
pgFileFree(file);
}
/*
@ -1242,18 +1242,8 @@ print_file_list(FILE *out, const parray *files, const char *root,
/* omit root directory portion */
if (root && strstr(path, root) == path)
path = GetRelativePath(path, root);
else if (file->extra_dir_num)
{
if (extra_prefix)
{
char extra_root[MAXPGPATH];
makeExtraDirPathByNum(extra_root, extra_prefix,
file->extra_dir_num);
path = GetRelativePath(path, extra_root);
}
else
path = GetRelativePath(path, file->extradir);
}
else if (file->extra_dir_num && !extra_prefix)
path = GetRelativePath(path, file->extradir);
fprintf(out, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", "
"\"mode\":\"%u\", \"is_datafile\":\"%u\", "
@ -1469,17 +1459,15 @@ dir_read_file_list(const char *root, const char *extra_prefix, const char *file_
get_control_value(buf, "compress_alg", compress_alg_string, NULL, false);
get_control_value(buf, "extra_dir_num", NULL, &extra_dir_num, false);
if (root)
if (extra_dir_num)
{
char temp[MAXPGPATH];
if (extra_dir_num && extra_prefix)
{
char temp[MAXPGPATH];
Assert(extra_prefix);
makeExtraDirPathByNum(temp, extra_prefix, extra_dir_num);
join_path_components(filepath, temp, path);
}
else
join_path_components(filepath, root, path);
makeExtraDirPathByNum(temp, extra_prefix, extra_dir_num);
join_path_components(filepath, temp, path);
}
else if (root)
join_path_components(filepath, root, path);
else
strcpy(filepath, path);

View File

@ -97,7 +97,8 @@ do_merge(time_t backup_id)
}
else
{
Assert(dest_backup);
if (dest_backup == NULL)
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
if (backup->start_time != prev_parent)
continue;
@ -221,11 +222,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
*/
pgBackupGetPath(to_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
to_files = dir_read_file_list(from_database_path, /* Use from_database_path
* so root path will be
* equal with 'files' */
from_extra_prefix,
control_file);
to_files = dir_read_file_list(NULL, NULL, control_file);
/* To delete from leaf, sort in reversed order */
parray_qsort(to_files, pgFileComparePathDesc);
/*
@ -233,7 +230,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
*/
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
DATABASE_FILE_LIST);
files = dir_read_file_list(from_database_path, from_extra_prefix, control_file);
files = dir_read_file_list(NULL, NULL, control_file);
/* sort by size for load balancing */
parray_qsort(files, pgFileCompareSize);
@ -277,17 +274,11 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
if (file->extra_dir_num && S_ISDIR(file->mode))
{
char dirpath[MAXPGPATH];
char *dir_name;
char old_container[MAXPGPATH];
char new_container[MAXPGPATH];
makeExtraDirPathByNum(old_container, from_extra_prefix,
file->extra_dir_num);
makeExtraDirPathByNum(new_container, to_extra_prefix,
file->extra_dir_num);
dir_name = GetRelativePath(file->path, old_container);
elog(VERBOSE, "Create directory \"%s\"", dir_name);
join_path_components(dirpath, new_container, dir_name);
join_path_components(dirpath, new_container, file->path);
dir_create_dir(dirpath, DIR_PERMISSION);
}
pg_atomic_init_flag(&file->lock);
@ -385,11 +376,37 @@ delete_source_backup:
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
{
char to_file_path[MAXPGPATH];
char *prev_path;
/* We need full path, file object has relative path */
join_path_components(to_file_path, to_database_path, file->path);
prev_path = file->path;
file->path = to_file_path;
pgFileDelete(file);
elog(VERBOSE, "Deleted \"%s\"", file->path);
file->path = prev_path;
}
}
/*
* Rename FULL backup directory.
*/
elog(INFO, "Rename %s to %s", to_backup_id, from_backup_id);
if (rename(to_backup_path, from_backup_path) == -1)
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
to_backup_path, from_backup_path, strerror(errno));
/*
* Merging finished, now we can safely update ID of the destination backup.
*/
to_backup->start_time = from_backup->start_time;
if (from_backup->extra_dir_str)
to_backup->extra_dir_str = from_backup->extra_dir_str;
write_backup(to_backup);
/* Cleanup */
if (threads)
{
@ -418,13 +435,14 @@ merge_files(void *arg)
pgBackup *from_backup = argument->from_backup;
int i,
num_files = parray_num(argument->files);
int to_root_len = strlen(argument->to_root);
for (i = 0; i < num_files; i++)
{
pgFile *file = (pgFile *) parray_get(argument->files, i);
pgFile *to_file;
pgFile **res_file;
char from_file_path[MAXPGPATH];
char *prev_file_path;
if (!pg_atomic_test_set_flag(&file->lock))
continue;
@ -469,19 +487,33 @@ merge_files(void *arg)
continue;
}
/* We need to make full path, file object has relative path */
if (file->extra_dir_num)
{
char temp[MAXPGPATH];
makeExtraDirPathByNum(temp, argument->from_extra_prefix,
file->extra_dir_num);
join_path_components(from_file_path, temp, file->path);
}
else
join_path_components(from_file_path, argument->from_root,
file->path);
prev_file_path = file->path;
file->path = from_file_path;
/*
* Move the file. We need to decompress it and compress again if
* necessary.
*/
elog(VERBOSE, "Moving file \"%s\", is_datafile %d, is_cfs %d",
elog(VERBOSE, "Merging file \"%s\", is_datafile %d, is_cfs %d",
file->path, file->is_database, file->is_cfs);
if (file->is_datafile && !file->is_cfs)
{
char to_path_tmp[MAXPGPATH]; /* Path of target file */
char to_file_path[MAXPGPATH]; /* Path of target file */
join_path_components(to_path_tmp, argument->to_root,
file->path + to_root_len + 1);
join_path_components(to_file_path, argument->to_root, prev_file_path);
/*
* We need more complicate algorithm if target file should be
@ -493,7 +525,7 @@ merge_files(void *arg)
char tmp_file_path[MAXPGPATH];
char *prev_path;
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_path_tmp);
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_file_path);
/* Start the magic */
@ -519,7 +551,7 @@ merge_files(void *arg)
* need the file in directory to_root.
*/
prev_path = to_file->path;
to_file->path = to_path_tmp;
to_file->path = to_file_path;
/* Decompress target file into temporary one */
restore_data_file(tmp_file_path, to_file, false, false,
parse_program_version(to_backup->program_version));
@ -534,7 +566,7 @@ merge_files(void *arg)
false,
parse_program_version(from_backup->program_version));
elog(VERBOSE, "Compress file and save it to the directory \"%s\"",
elog(VERBOSE, "Compress file and save it into the directory \"%s\"",
argument->to_root);
/* Again we need to change path */
@ -544,7 +576,7 @@ merge_files(void *arg)
file->size = pgFileSize(file->path);
/* Now we can compress the file */
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
to_path_tmp, file,
to_file_path, file,
to_backup->start_lsn,
to_backup->backup_mode,
to_backup->compress_alg,
@ -563,7 +595,7 @@ merge_files(void *arg)
else
{
/* We can merge in-place here */
restore_data_file(to_path_tmp, file,
restore_data_file(to_file_path, file,
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
true,
parse_program_version(from_backup->program_version));
@ -572,8 +604,8 @@ merge_files(void *arg)
* We need to calculate write_size, restore_data_file() doesn't
* do that.
*/
file->write_size = pgFileSize(to_path_tmp);
file->crc = pgFileGetCRC(to_path_tmp, true, true, NULL);
file->write_size = pgFileSize(to_file_path);
file->crc = pgFileGetCRC(to_file_path, true, true, NULL);
}
}
else if (strcmp(file->name, "pg_control") == 0)
@ -596,9 +628,18 @@ merge_files(void *arg)
else
copy_file(argument->from_root, argument->to_root, file);
/*
* We need to save compression algorithm type of the target backup to be
* able to restore in the future.
*/
file->compress_alg = to_backup->compress_alg;
if (file->write_size != BYTES_INVALID)
elog(LOG, "Moved file \"%s\": " INT64_FORMAT " bytes",
elog(LOG, "Merged file \"%s\": " INT64_FORMAT " bytes",
file->path, file->write_size);
/* Restore relative path */
file->path = prev_file_path;
}
/* Data files merging is successful */

View File

@ -334,15 +334,6 @@ main(int argc, char *argv[])
if (rc != -1 && !S_ISDIR(stat_buf.st_mode))
elog(ERROR, "-B, --backup-path must be a path to directory");
/* command was initialized for a few commands */
if (command)
{
elog_file(INFO, "command: %s", command);
pfree(command);
command = NULL;
}
/* Option --instance is required for all commands except init and show */
if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD &&
backup_subcmd != VALIDATE_CMD)
@ -375,8 +366,9 @@ main(int argc, char *argv[])
}
/*
* Read options from env variables or from config file,
* unless we're going to set them via set-config.
* We read options from command line, now we need to read them from
* configuration file since we got backup path and instance name.
* For some commands an instance option isn't required, see above.
*/
if (instance_name)
{
@ -393,6 +385,15 @@ main(int argc, char *argv[])
/* Initialize logger */
init_logger(backup_path, &instance_config.logger);
/* command was initialized for a few commands */
if (command)
{
elog_file(INFO, "command: %s", command);
pfree(command);
command = NULL;
}
/*
* We have read pgdata path from command line or from configuration file.
* Ensure that pgdata is an absolute path.
@ -584,7 +585,7 @@ compress_init(void)
if (backup_subcmd == BACKUP_CMD || backup_subcmd == ARCHIVE_PUSH_CMD)
{
#ifndef HAVE_LIBZ
if (compress_alg == ZLIB_COMPRESS)
if (instance_config.compress_alg == ZLIB_COMPRESS)
elog(ERROR, "This build does not support zlib compression");
else
#endif

View File

@ -484,7 +484,6 @@ extern void pgBackupGetPath2(const pgBackup *backup, char *path, size_t len,
const char *subdir1, const char *subdir2);
extern int pgBackupCreateDir(pgBackup *backup);
extern void pgBackupInit(pgBackup *backup);
extern void pgBackupCopy(pgBackup *dst, pgBackup *src);
extern void pgBackupFree(void *backup);
extern int pgBackupCompareId(const void *f1, const void *f2);
extern int pgBackupCompareIdDesc(const void *f1, const void *f2);

View File

@ -520,7 +520,7 @@ restore_backup(pgBackup *backup, const char *extra_dir_str)
/* By default there are some error */
threads_args[i].ret = 1;
elog(LOG, "Start thread for num:%li", parray_num(files));
elog(LOG, "Start thread for num:%zu", parray_num(files));
pthread_create(&threads[i], NULL, restore_files, arg);
}

View File

@ -1,4 +1,4 @@
/*-------------------------------------------------------------------------
/*-------------------------------------------------------------------------
*
* configuration.c: - function implementations to work with pg_probackup
* configurations.

View File

@ -40,10 +40,10 @@ typedef enum
void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3);
static void elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
pg_attribute_printf(3, 0);
static void elog_internal(int elevel, bool file_only, const char *message);
static void elog_stderr(int elevel, const char *fmt, ...)
pg_attribute_printf(2, 3);
static char *get_log_message(const char *fmt, va_list args) pg_attribute_printf(1, 0);
/* Functions to work with log files */
static void open_logfile(FILE **file, const char *filename_format);
@ -74,7 +74,7 @@ init_logger(const char *root_path, LoggerConfig *config)
/* Set log path */
if (config->log_directory == NULL)
{
config->log_directory = palloc(MAXPGPATH);
config->log_directory = pgut_malloc(MAXPGPATH);
join_path_components(config->log_directory,
root_path, LOG_DIRECTORY_DEFAULT);
}
@ -148,13 +148,11 @@ exit_if_necessary(int elevel)
* Actual implementation for elog() and pg_log().
*/
static void
elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
elog_internal(int elevel, bool file_only, const char *message)
{
bool write_to_file,
write_to_error_log,
write_to_stderr;
va_list error_args,
std_args;
time_t log_time = (time_t) time(NULL);
char strfbuf[128];
@ -165,22 +163,8 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
write_to_stderr = elevel >= logger_config.log_level_console && !file_only;
pthread_lock(&log_file_mutex);
#ifdef WIN32
std_args = NULL;
error_args = NULL;
#endif
loggin_in_progress = true;
/* We need copy args only if we need write to error log file */
if (write_to_error_log)
va_copy(error_args, args);
/*
* We need copy args only if we need write to stderr. But do not copy args
* if we need to log only to stderr.
*/
if (write_to_stderr && write_to_file)
va_copy(std_args, args);
if (write_to_file || write_to_error_log)
strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z",
localtime(&log_time));
@ -203,8 +187,7 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
fprintf(log_file, "%s: ", strfbuf);
write_elevel(log_file, elevel);
vfprintf(log_file, fmt, args);
fputc('\n', log_file);
fprintf(log_file, "%s\n", message);
fflush(log_file);
}
@ -221,11 +204,8 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
fprintf(error_log_file, "%s: ", strfbuf);
write_elevel(error_log_file, elevel);
vfprintf(error_log_file, fmt, error_args);
fputc('\n', error_log_file);
fprintf(error_log_file, "%s\n", message);
fflush(error_log_file);
va_end(error_args);
}
/*
@ -235,15 +215,9 @@ elog_internal(int elevel, bool file_only, const char *fmt, va_list args)
if (write_to_stderr)
{
write_elevel(stderr, elevel);
if (write_to_file)
vfprintf(stderr, fmt, std_args);
else
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
fflush(stderr);
if (write_to_file)
va_end(std_args);
fprintf(stderr, "%s\n", message);
fflush(stderr);
}
exit_if_necessary(elevel);
@ -280,12 +254,44 @@ elog_stderr(int elevel, const char *fmt, ...)
exit_if_necessary(elevel);
}
/*
* Formats text data under the control of fmt and returns it in an allocated
* buffer.
*/
static char *
get_log_message(const char *fmt, va_list args)
{
size_t len = 256; /* initial assumption about buffer size */
for (;;)
{
char *result;
size_t newlen;
va_list copy_args;
result = (char *) pgut_malloc(len);
/* Try to format the data */
va_copy(copy_args, args);
newlen = pvsnprintf(result, len, fmt, copy_args);
va_end(copy_args);
if (newlen < len)
return result; /* success */
/* Release buffer and loop around to try again with larger len. */
pfree(result);
len = newlen;
}
}
/*
* Logs to stderr or to log file and exit if ERROR.
*/
void
elog(int elevel, const char *fmt, ...)
{
char *message;
va_list args;
/*
@ -297,8 +303,11 @@ elog(int elevel, const char *fmt, ...)
return;
va_start(args, fmt);
elog_internal(elevel, false, fmt, args);
message = get_log_message(fmt, args);
va_end(args);
elog_internal(elevel, false, message);
pfree(message);
}
/*
@ -307,6 +316,7 @@ elog(int elevel, const char *fmt, ...)
void
elog_file(int elevel, const char *fmt, ...)
{
char *message;
va_list args;
/*
@ -317,8 +327,11 @@ elog_file(int elevel, const char *fmt, ...)
return;
va_start(args, fmt);
elog_internal(elevel, true, fmt, args);
message = get_log_message(fmt, args);
va_end(args);
elog_internal(elevel, true, message);
pfree(message);
}
/*
@ -327,6 +340,7 @@ elog_file(int elevel, const char *fmt, ...)
void
pg_log(eLogType type, const char *fmt, ...)
{
char *message;
va_list args;
int elevel = INFO;
@ -359,8 +373,11 @@ pg_log(eLogType type, const char *fmt, ...)
return;
va_start(args, fmt);
elog_internal(elevel, false, fmt, args);
message = get_log_message(fmt, args);
va_end(args);
elog_internal(elevel, false, message);
pfree(message);
}
/*
@ -445,7 +462,7 @@ logfile_getname(const char *format, time_t timestamp)
logger_config.log_directory[0] == '\0')
elog_stderr(ERROR, "logging path is not set");
filename = (char *) palloc(MAXPGPATH);
filename = (char *) pgut_malloc(MAXPGPATH);
snprintf(filename, MAXPGPATH, "%s/", logger_config.log_directory);

View File

@ -703,7 +703,10 @@ on_interrupt(void)
/* Set interruped flag */
interrupted = true;
/* User promts password, call on_cleanup() byhand */
/*
* User promts password, call on_cleanup() byhand. Unless we do that we will
* get stuck forever until a user enters a password.
*/
if (in_password)
{
on_cleanup();

View File

@ -53,6 +53,14 @@ pgBackupValidate(pgBackup *backup)
validate_files_arg *threads_args;
int i;
/* Check backup version */
if (backup->program_version &&
parse_program_version(backup->program_version) > parse_program_version(PROGRAM_VERSION))
elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. "
"pg_probackup do not guarantee to be forward compatible. "
"Please upgrade pg_probackup binary.",
PROGRAM_VERSION, base36enc(backup->start_time), backup->program_version);
/* Revalidation is attempted for DONE, ORPHAN and CORRUPT backups */
if (backup->status != BACKUP_STATUS_OK &&
backup->status != BACKUP_STATUS_DONE &&

View File

@ -1047,7 +1047,7 @@ class ProbackupTest(object):
except:
pass
def pgdata_content(self, directory, ignore_ptrack=True):
def pgdata_content(self, pgdata, ignore_ptrack=True):
""" return dict with directory content. "
" TAKE IT AFTER CHECKPOINT or BACKUP"""
dirs_to_ignore = [
@ -1064,9 +1064,10 @@ class ProbackupTest(object):
# '_ptrack'
# )
directory_dict = {}
directory_dict['pgdata'] = directory
directory_dict['pgdata'] = pgdata
directory_dict['files'] = {}
for root, dirs, files in os.walk(directory, followlinks=True):
directory_dict['dirs'] = []
for root, dirs, files in os.walk(pgdata, followlinks=True):
dirs[:] = [d for d in dirs if d not in dirs_to_ignore]
for file in files:
if (
@ -1076,7 +1077,7 @@ class ProbackupTest(object):
continue
file_fullpath = os.path.join(root, file)
file_relpath = os.path.relpath(file_fullpath, directory)
file_relpath = os.path.relpath(file_fullpath, pgdata)
directory_dict['files'][file_relpath] = {'is_datafile': False}
directory_dict['files'][file_relpath]['md5'] = hashlib.md5(
open(file_fullpath, 'rb').read()).hexdigest()
@ -1089,12 +1090,51 @@ class ProbackupTest(object):
file_fullpath, size_in_pages
)
for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True):
for directory in dirs:
directory_path = os.path.join(root, directory)
directory_relpath = os.path.relpath(directory_path, pgdata)
found = False
for d in dirs_to_ignore:
if d in directory_relpath:
found = True
break
# check if directory already here as part of larger directory
if not found:
for d in directory_dict['dirs']:
# print("OLD dir {0}".format(d))
if directory_relpath in d:
found = True
break
if not found:
directory_dict['dirs'].append(directory_relpath)
return directory_dict
def compare_pgdata(self, original_pgdata, restored_pgdata):
""" return dict with directory content. DO IT BEFORE RECOVERY"""
fail = False
error_message = 'Restored PGDATA is not equal to original!\n'
# Compare directories
for directory in restored_pgdata['dirs']:
if directory not in original_pgdata['dirs']:
fail = True
error_message += '\nDirectory was not present'
error_message += ' in original PGDATA: {0}\n'.format(
os.path.join(restored_pgdata['pgdata'], directory))
for directory in original_pgdata['dirs']:
if directory not in restored_pgdata['dirs']:
fail = True
error_message += '\nDirectory dissappeared'
error_message += ' in restored PGDATA: {0}\n'.format(
os.path.join(restored_pgdata['pgdata'], directory))
for file in restored_pgdata['files']:
# File is present in RESTORED PGDATA
# but not present in ORIGINAL

View File

@ -3,6 +3,7 @@
import unittest
import os
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
import shutil
module_name = "merge"
@ -316,11 +317,323 @@ class MergeTest(ProbackupTest, unittest.TestCase):
node.cleanup()
self.del_test_dir(module_name, fname)
def test_merge_compressed_and_uncompressed_backups_1(self):
"""
Test MERGE command with compressed and uncompressed backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"],
pg_options={
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.slow_start()
# Fill with data
node.pgbench_init(scale=5)
# Do compressed FULL backup
self.backup_node(backup_dir, "node", node, options=[
'--compress-algorithm=zlib', '--stream'])
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Change data
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
pgbench.wait()
# Do uncompressed DELTA backup
self.backup_node(
backup_dir, "node", node, backup_type="delta",
options=['--stream'])
# Change data
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
pgbench.wait()
# Do compressed PAGE backup
self.backup_node(
backup_dir, "node", node, backup_type="page",
options=['--compress-algorithm=zlib'])
pgdata = self.pgdata_content(node.data_dir)
show_backup = self.show_pb(backup_dir, "node")[2]
page_id = show_backup["id"]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
def test_merge_compressed_and_uncompressed_backups_2(self):
"""
Test MERGE command with compressed and uncompressed backups
"""
fname = self.id().split(".")[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup")
# Initialize instance and backup directory
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
initdb_params=["--data-checksums"],
pg_options={
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, "node", node)
self.set_archiving(backup_dir, "node", node)
node.slow_start()
# Fill with data
node.pgbench_init(scale=5)
# Do uncompressed FULL backup
self.backup_node(backup_dir, "node", node)
show_backup = self.show_pb(backup_dir, "node")[0]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "FULL")
# Change data
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
pgbench.wait()
# Do compressed DELTA backup
self.backup_node(
backup_dir, "node", node, backup_type="delta",
options=['--compress-algorithm=zlib', '--stream'])
# Change data
pgbench = node.pgbench(options=['-T', '20', '-c', '2', '--no-vacuum'])
pgbench.wait()
# Do uncompressed PAGE backup
self.backup_node(
backup_dir, "node", node, backup_type="page")
pgdata = self.pgdata_content(node.data_dir)
show_backup = self.show_pb(backup_dir, "node")[2]
page_id = show_backup["id"]
self.assertEqual(show_backup["status"], "OK")
self.assertEqual(show_backup["backup-mode"], "PAGE")
# Merge all backups
self.merge_backup(backup_dir, "node", page_id)
show_backups = self.show_pb(backup_dir, "node")
self.assertEqual(len(show_backups), 1)
self.assertEqual(show_backups[0]["status"], "OK")
self.assertEqual(show_backups[0]["backup-mode"], "FULL")
# Drop node and restore it
node.cleanup()
self.restore_node(backup_dir, 'node', node)
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
node.cleanup()
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_merge_tablespaces(self):
"""
Some test here
Create tablespace with table, take FULL backup,
create another tablespace with another table and drop previous
tablespace, take page backup, merge it and restore
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
node.safe_psql(
"postgres",
"create table t_heap tablespace somedata as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
# FULL backup
self.backup_node(backup_dir, 'node', node)
# Create new tablespace
self.create_tblspace_in_node(node, 'somedata1')
node.safe_psql(
"postgres",
"create table t_heap1 tablespace somedata1 as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
node.safe_psql(
"postgres",
"drop table t_heap"
)
# Drop old tablespace
node.safe_psql(
"postgres",
"drop tablespace somedata"
)
# PAGE backup
backup_id = self.backup_node(backup_dir, 'node', node, backup_type="page")
pgdata = self.pgdata_content(node.data_dir)
node.stop()
shutil.rmtree(
self.get_tblspace_path(node, 'somedata'),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(node, 'somedata1'),
ignore_errors=True)
node.cleanup()
self.merge_backup(backup_dir, 'node', backup_id)
self.restore_node(
backup_dir, 'node', node, options=["-j", "4"])
pgdata_restored = self.pgdata_content(node.data_dir)
# this compare should fall because we lost some directories
self.compare_pgdata(pgdata, pgdata_restored)
# @unittest.skip("skip")
def test_merge_tablespaces_1(self):
"""
Create tablespace with table, take FULL backup,
create another tablespace with another table, take page backup,
drop first tablespace and take delta backup,
merge it and restore
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'wal_level': 'replica',
'max_wal_senders': '2',
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.start()
self.create_tblspace_in_node(node, 'somedata')
# FULL backup
self.backup_node(backup_dir, 'node', node)
node.safe_psql(
"postgres",
"create table t_heap tablespace somedata as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
# CREATE NEW TABLESPACE
self.create_tblspace_in_node(node, 'somedata1')
node.safe_psql(
"postgres",
"create table t_heap1 tablespace somedata1 as select i as id,"
" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
" from generate_series(0,100) i"
)
# PAGE backup
self.backup_node(backup_dir, 'node', node, backup_type="page")
node.safe_psql(
"postgres",
"drop table t_heap"
)
node.safe_psql(
"postgres",
"drop tablespace somedata"
)
# DELTA backup
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type="delta")
pgdata = self.pgdata_content(node.data_dir)
node.stop()
shutil.rmtree(
self.get_tblspace_path(node, 'somedata'),
ignore_errors=True)
shutil.rmtree(
self.get_tblspace_path(node, 'somedata1'),
ignore_errors=True)
node.cleanup()
self.merge_backup(backup_dir, 'node', backup_id)
self.restore_node(
backup_dir, 'node', node,
options=["-j", "4"])
pgdata_restored = self.pgdata_content(node.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname)
def test_merge_page_truncate(self):
"""
@ -895,3 +1208,4 @@ class MergeTest(ProbackupTest, unittest.TestCase):
# FULL MERGING
# 3. Need new test with corrupted FULL backup
# 4. different compression levels

View File

@ -1,6 +1,7 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
from testgres import QueryException
from datetime import datetime, timedelta
import subprocess
import gzip
@ -1030,3 +1031,120 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_create_db(self):
"""
Make node, take full backup, create database db1, take page backup,
restore database and check it presense
"""
self.maxDiff = None
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir="{0}/{1}/node".format(module_name, fname),
set_replication=True,
initdb_params=['--data-checksums'],
pg_options={
'max_wal_size': '10GB',
'max_wal_senders': '2',
'checkpoint_timeout': '5min',
'autovacuum': 'off'
}
)
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL BACKUP
node.safe_psql(
"postgres",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
self.backup_node(
backup_dir, 'node', node)
# CREATE DATABASE DB1
node.safe_psql("postgres", "create database db1")
node.safe_psql(
"db1",
"create table t_heap as select i as id, md5(i::text) as text, "
"md5(i::text)::tsvector as tsvector from generate_series(0,1000) i")
# PAGE BACKUP
backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# RESTORE
node_restored = self.make_simple_node(
base_dir="{0}/{1}/node_restored".format(module_name, fname)
)
node_restored.cleanup()
self.restore_node(
backup_dir, 'node', node_restored,
backup_id=backup_id, options=["-j", "4"])
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(node_restored.data_dir)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
node_restored.safe_psql('db1', 'select 1')
node_restored.cleanup()
# DROP DATABASE DB1
node.safe_psql(
"postgres", "drop database db1")
# SECOND PTRACK BACKUP
backup_id = self.backup_node(
backup_dir, 'node', node, backup_type='page')
if self.paranoia:
pgdata = self.pgdata_content(node.data_dir)
# RESTORE SECOND PTRACK BACKUP
self.restore_node(
backup_dir, 'node', node_restored,
backup_id=backup_id, options=["-j", "4"]
)
# COMPARE PHYSICAL CONTENT
if self.paranoia:
pgdata_restored = self.pgdata_content(
node_restored.data_dir, ignore_ptrack=False)
self.compare_pgdata(pgdata, pgdata_restored)
# START RESTORED NODE
node_restored.append_conf(
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
node_restored.slow_start()
try:
node_restored.safe_psql('db1', 'select 1')
# we should die here because exception is what we expect to happen
self.assertEqual(
1, 0,
"Expecting Error because we are connecting to deleted database"
"\n Output: {0} \n CMD: {1}".format(
repr(self.output), self.cmd)
)
except QueryException as e:
self.assertTrue(
'FATAL: database "db1" does not exist' in e.message,
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
repr(e.message), self.cmd)
)
# Clean after yourself
self.del_test_dir(module_name, fname)