2018-08-02 11:57:39 +03:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* merge.c: merge FULL and incremental backups
|
|
|
|
*
|
|
|
|
* Copyright (c) 2018, Postgres Professional
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2018-10-18 15:43:30 +03:00
|
|
|
#include "pg_probackup.h"
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include "utils/thread.h"
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
parray *to_files;
|
|
|
|
parray *files;
|
|
|
|
|
|
|
|
pgBackup *to_backup;
|
|
|
|
pgBackup *from_backup;
|
|
|
|
|
|
|
|
const char *to_root;
|
|
|
|
const char *from_root;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return value from the thread.
|
|
|
|
* 0 means there is no error, 1 - there is an error.
|
|
|
|
*/
|
|
|
|
int ret;
|
|
|
|
} merge_files_arg;
|
|
|
|
|
|
|
|
static void merge_backups(pgBackup *backup, pgBackup *next_backup);
|
|
|
|
static void *merge_files(void *arg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implementation of MERGE command.
|
|
|
|
*
|
|
|
|
* - Find target and its parent full backup
|
|
|
|
* - Merge data files of target, parent and and intermediate backups
|
|
|
|
* - Remove unnecessary files, which doesn't exist in the target backup anymore
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
do_merge(time_t backup_id)
|
|
|
|
{
|
|
|
|
parray *backups;
|
|
|
|
pgBackup *dest_backup = NULL;
|
|
|
|
pgBackup *full_backup = NULL;
|
|
|
|
time_t prev_parent = INVALID_BACKUP_ID;
|
|
|
|
int i;
|
|
|
|
int dest_backup_idx = 0;
|
|
|
|
int full_backup_idx = 0;
|
|
|
|
|
|
|
|
if (backup_id == INVALID_BACKUP_ID)
|
|
|
|
elog(ERROR, "required parameter is not specified: --backup-id");
|
|
|
|
|
|
|
|
if (instance_name == NULL)
|
|
|
|
elog(ERROR, "required parameter is not specified: --instance");
|
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
elog(INFO, "Merge started");
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
/* Get list of all backups sorted in order of descending start time */
|
|
|
|
backups = catalog_get_backup_list(INVALID_BACKUP_ID);
|
|
|
|
|
|
|
|
/* Find destination and parent backups */
|
|
|
|
for (i = 0; i < parray_num(backups); i++)
|
|
|
|
{
|
|
|
|
pgBackup *backup = (pgBackup *) parray_get(backups, i);
|
|
|
|
|
|
|
|
if (backup->start_time > backup_id)
|
|
|
|
continue;
|
|
|
|
else if (backup->start_time == backup_id && !dest_backup)
|
|
|
|
{
|
2018-10-15 15:43:20 +03:00
|
|
|
if (backup->status != BACKUP_STATUS_OK &&
|
|
|
|
/* It is possible that previous merging was interrupted */
|
2018-10-16 18:13:27 +03:00
|
|
|
backup->status != BACKUP_STATUS_MERGING &&
|
|
|
|
backup->status != BACKUP_STATUS_DELETING)
|
2018-08-02 11:57:39 +03:00
|
|
|
elog(ERROR, "Backup %s has status: %s",
|
|
|
|
base36enc(backup->start_time), status2str(backup->status));
|
|
|
|
|
|
|
|
if (backup->backup_mode == BACKUP_MODE_FULL)
|
2018-10-08 17:25:50 +03:00
|
|
|
elog(ERROR, "Backup %s is full backup",
|
2018-08-02 11:57:39 +03:00
|
|
|
base36enc(backup->start_time));
|
|
|
|
|
|
|
|
dest_backup = backup;
|
|
|
|
dest_backup_idx = i;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-12-12 15:10:25 +03:00
|
|
|
if (dest_backup == NULL)
|
|
|
|
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
if (backup->start_time != prev_parent)
|
|
|
|
continue;
|
|
|
|
|
2018-10-15 15:43:20 +03:00
|
|
|
if (backup->status != BACKUP_STATUS_OK &&
|
|
|
|
/* It is possible that previous merging was interrupted */
|
|
|
|
backup->status != BACKUP_STATUS_MERGING)
|
|
|
|
elog(ERROR, "Backup %s has status: %s",
|
2018-08-02 11:57:39 +03:00
|
|
|
base36enc(backup->start_time), status2str(backup->status));
|
|
|
|
|
|
|
|
/* If we already found dest_backup, look for full backup */
|
|
|
|
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
|
|
|
|
{
|
|
|
|
full_backup = backup;
|
|
|
|
full_backup_idx = i;
|
|
|
|
|
|
|
|
/* Found target and full backups, so break the loop */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_parent = backup->parent_backup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dest_backup == NULL)
|
|
|
|
elog(ERROR, "Target backup %s was not found", base36enc(backup_id));
|
|
|
|
if (full_backup == NULL)
|
|
|
|
elog(ERROR, "Parent full backup for the given backup %s was not found",
|
|
|
|
base36enc(backup_id));
|
|
|
|
|
|
|
|
Assert(full_backup_idx != dest_backup_idx);
|
|
|
|
|
2018-12-18 10:49:14 +03:00
|
|
|
catalog_lock_backup_list(backups, full_backup_idx, dest_backup_idx);
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/*
|
|
|
|
* Found target and full backups, merge them and intermediate backups
|
|
|
|
*/
|
|
|
|
for (i = full_backup_idx; i > dest_backup_idx; i--)
|
|
|
|
{
|
|
|
|
pgBackup *from_backup = (pgBackup *) parray_get(backups, i - 1);
|
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
merge_backups(full_backup, from_backup);
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
pgBackupValidate(full_backup);
|
|
|
|
if (full_backup->status == BACKUP_STATUS_CORRUPT)
|
|
|
|
elog(ERROR, "Merging of backup %s failed", base36enc(backup_id));
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/* cleanup */
|
|
|
|
parray_walk(backups, pgBackupFree);
|
|
|
|
parray_free(backups);
|
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
elog(INFO, "Merge of backup %s completed", base36enc(backup_id));
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Merge two backups data files using threads.
|
|
|
|
* - move instance files from from_backup to to_backup
|
|
|
|
* - remove unnecessary directories and files from to_backup
|
|
|
|
* - update metadata of from_backup, it becames FULL backup
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
|
|
|
{
|
|
|
|
char *to_backup_id = base36enc_dup(to_backup->start_time),
|
|
|
|
*from_backup_id = base36enc_dup(from_backup->start_time);
|
|
|
|
char to_backup_path[MAXPGPATH],
|
|
|
|
to_database_path[MAXPGPATH],
|
|
|
|
from_backup_path[MAXPGPATH],
|
|
|
|
from_database_path[MAXPGPATH],
|
|
|
|
control_file[MAXPGPATH];
|
|
|
|
parray *files,
|
|
|
|
*to_files;
|
2018-11-13 15:49:09 +03:00
|
|
|
pthread_t *threads = NULL;
|
2018-11-15 11:46:42 +03:00
|
|
|
merge_files_arg *threads_args = NULL;
|
2018-08-02 11:57:39 +03:00
|
|
|
int i;
|
2019-02-28 19:09:36 +03:00
|
|
|
time_t merge_time;
|
2018-08-02 11:57:39 +03:00
|
|
|
bool merge_isok = true;
|
|
|
|
|
2019-02-28 19:09:36 +03:00
|
|
|
merge_time = time(NULL);
|
2018-10-16 18:13:27 +03:00
|
|
|
elog(INFO, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
/*
|
|
|
|
* Validate to_backup only if it is BACKUP_STATUS_OK. If it has
|
|
|
|
* BACKUP_STATUS_MERGING status then it isn't valid backup until merging
|
|
|
|
* finished.
|
|
|
|
*/
|
|
|
|
if (to_backup->status == BACKUP_STATUS_OK)
|
|
|
|
{
|
|
|
|
pgBackupValidate(to_backup);
|
|
|
|
if (to_backup->status == BACKUP_STATUS_CORRUPT)
|
|
|
|
elog(ERROR, "Interrupt merging");
|
|
|
|
}
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-11-09 18:32:37 +03:00
|
|
|
/*
|
|
|
|
* It is OK to validate from_backup if it has BACKUP_STATUS_OK or
|
|
|
|
* BACKUP_STATUS_MERGING status.
|
|
|
|
*/
|
|
|
|
Assert(from_backup->status == BACKUP_STATUS_OK ||
|
|
|
|
from_backup->status == BACKUP_STATUS_MERGING);
|
|
|
|
pgBackupValidate(from_backup);
|
|
|
|
if (from_backup->status == BACKUP_STATUS_CORRUPT)
|
|
|
|
elog(ERROR, "Interrupt merging");
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make backup paths.
|
|
|
|
*/
|
|
|
|
pgBackupGetPath(to_backup, to_backup_path, lengthof(to_backup_path), NULL);
|
|
|
|
pgBackupGetPath(to_backup, to_database_path, lengthof(to_database_path),
|
|
|
|
DATABASE_DIR);
|
|
|
|
pgBackupGetPath(from_backup, from_backup_path, lengthof(from_backup_path), NULL);
|
|
|
|
pgBackupGetPath(from_backup, from_database_path, lengthof(from_database_path),
|
|
|
|
DATABASE_DIR);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get list of files which will be modified or removed.
|
|
|
|
*/
|
|
|
|
pgBackupGetPath(to_backup, control_file, lengthof(control_file),
|
|
|
|
DATABASE_FILE_LIST);
|
2018-12-14 18:41:23 +03:00
|
|
|
to_files = dir_read_file_list(NULL, control_file);
|
2018-08-02 11:57:39 +03:00
|
|
|
/* To delete from leaf, sort in reversed order */
|
|
|
|
parray_qsort(to_files, pgFileComparePathDesc);
|
|
|
|
/*
|
|
|
|
* Get list of files which need to be moved.
|
|
|
|
*/
|
|
|
|
pgBackupGetPath(from_backup, control_file, lengthof(control_file),
|
|
|
|
DATABASE_FILE_LIST);
|
2018-12-14 18:41:23 +03:00
|
|
|
files = dir_read_file_list(NULL, control_file);
|
2018-08-02 11:57:39 +03:00
|
|
|
/* sort by size for load balancing */
|
|
|
|
parray_qsort(files, pgFileCompareSize);
|
|
|
|
|
2018-11-13 15:49:09 +03:00
|
|
|
/*
|
|
|
|
* Previous merging was interrupted during deleting source backup. It is
|
|
|
|
* safe just to delete it again.
|
|
|
|
*/
|
|
|
|
if (from_backup->status == BACKUP_STATUS_DELETING)
|
|
|
|
goto delete_source_backup;
|
|
|
|
|
2019-02-18 16:55:46 +03:00
|
|
|
write_backup_status(to_backup, BACKUP_STATUS_MERGING);
|
|
|
|
write_backup_status(from_backup, BACKUP_STATUS_MERGING);
|
2018-11-13 15:49:09 +03:00
|
|
|
|
|
|
|
create_data_directories(to_database_path, from_backup_path, false);
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads);
|
|
|
|
threads_args = (merge_files_arg *) palloc(sizeof(merge_files_arg) * num_threads);
|
|
|
|
|
|
|
|
/* Setup threads */
|
|
|
|
for (i = 0; i < parray_num(files); i++)
|
|
|
|
{
|
|
|
|
pgFile *file = (pgFile *) parray_get(files, i);
|
|
|
|
|
|
|
|
pg_atomic_init_flag(&file->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
merge_files_arg *arg = &(threads_args[i]);
|
|
|
|
|
|
|
|
arg->to_files = to_files;
|
|
|
|
arg->files = files;
|
|
|
|
arg->to_backup = to_backup;
|
|
|
|
arg->from_backup = from_backup;
|
|
|
|
arg->to_root = to_database_path;
|
|
|
|
arg->from_root = from_database_path;
|
|
|
|
/* By default there are some error */
|
|
|
|
arg->ret = 1;
|
|
|
|
|
|
|
|
elog(VERBOSE, "Start thread: %d", i);
|
|
|
|
|
|
|
|
pthread_create(&threads[i], NULL, merge_files, arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait threads */
|
|
|
|
for (i = 0; i < num_threads; i++)
|
|
|
|
{
|
|
|
|
pthread_join(threads[i], NULL);
|
|
|
|
if (threads_args[i].ret == 1)
|
|
|
|
merge_isok = false;
|
|
|
|
}
|
|
|
|
if (!merge_isok)
|
|
|
|
elog(ERROR, "Data files merging failed");
|
|
|
|
|
|
|
|
/*
|
2018-10-16 18:13:27 +03:00
|
|
|
* Update to_backup metadata.
|
2018-08-02 11:57:39 +03:00
|
|
|
*/
|
2018-10-16 18:13:27 +03:00
|
|
|
to_backup->status = BACKUP_STATUS_OK;
|
2019-03-01 16:16:58 +03:00
|
|
|
StrNCpy(to_backup->program_version, PROGRAM_VERSION,
|
|
|
|
sizeof(to_backup->program_version));
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->parent_backup = INVALID_BACKUP_ID;
|
|
|
|
to_backup->start_lsn = from_backup->start_lsn;
|
|
|
|
to_backup->stop_lsn = from_backup->stop_lsn;
|
|
|
|
to_backup->recovery_time = from_backup->recovery_time;
|
|
|
|
to_backup->recovery_xid = from_backup->recovery_xid;
|
2019-02-28 19:09:36 +03:00
|
|
|
to_backup->merge_time = merge_time;
|
2019-02-28 18:45:52 +03:00
|
|
|
to_backup->end_time = time(NULL);
|
|
|
|
|
2018-12-07 17:24:21 +03:00
|
|
|
/*
|
2019-03-04 16:40:11 +03:00
|
|
|
* Target backup must inherit wal mode too.
|
2018-12-07 17:24:21 +03:00
|
|
|
*/
|
2019-03-04 16:40:11 +03:00
|
|
|
to_backup->stream = from_backup->stream;
|
2018-10-16 18:13:27 +03:00
|
|
|
/* Compute summary of size of regular files in the backup */
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->data_bytes = 0;
|
2018-08-02 11:57:39 +03:00
|
|
|
for (i = 0; i < parray_num(files); i++)
|
|
|
|
{
|
|
|
|
pgFile *file = (pgFile *) parray_get(files, i);
|
|
|
|
|
2018-10-16 18:13:27 +03:00
|
|
|
if (S_ISDIR(file->mode))
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->data_bytes += 4096;
|
2018-10-16 18:13:27 +03:00
|
|
|
/* Count the amount of the data actually copied */
|
|
|
|
else if (S_ISREG(file->mode))
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->data_bytes += file->write_size;
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
2018-10-16 18:13:27 +03:00
|
|
|
/* compute size of wal files of this backup stored in the archive */
|
|
|
|
if (!to_backup->stream)
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->wal_bytes = instance_config.xlog_seg_size *
|
2018-11-15 15:22:42 +03:00
|
|
|
(to_backup->stop_lsn / instance_config.xlog_seg_size -
|
|
|
|
to_backup->start_lsn / instance_config.xlog_seg_size + 1);
|
2018-10-16 18:13:27 +03:00
|
|
|
else
|
2018-12-07 17:24:21 +03:00
|
|
|
to_backup->wal_bytes = BYTES_INVALID;
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-10-16 18:13:27 +03:00
|
|
|
write_backup_filelist(to_backup, files, from_database_path);
|
|
|
|
write_backup(to_backup);
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-10-16 18:13:27 +03:00
|
|
|
delete_source_backup:
|
|
|
|
/*
|
|
|
|
* Files were copied into to_backup. It is time to remove source backup
|
|
|
|
* entirely.
|
|
|
|
*/
|
|
|
|
delete_backup_files(from_backup);
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete files which are not in from_backup file list.
|
|
|
|
*/
|
2018-12-06 19:41:10 +03:00
|
|
|
parray_qsort(files, pgFileComparePathDesc);
|
2018-08-02 11:57:39 +03:00
|
|
|
for (i = 0; i < parray_num(to_files); i++)
|
|
|
|
{
|
|
|
|
pgFile *file = (pgFile *) parray_get(to_files, i);
|
|
|
|
|
|
|
|
if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL)
|
|
|
|
{
|
2018-12-14 18:41:23 +03:00
|
|
|
char to_file_path[MAXPGPATH];
|
|
|
|
char *prev_path;
|
|
|
|
|
|
|
|
/* We need full path, file object has relative path */
|
|
|
|
join_path_components(to_file_path, to_database_path, file->path);
|
|
|
|
prev_path = file->path;
|
|
|
|
file->path = to_file_path;
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
pgFileDelete(file);
|
2018-10-16 18:13:27 +03:00
|
|
|
elog(VERBOSE, "Deleted \"%s\"", file->path);
|
2018-12-14 18:41:23 +03:00
|
|
|
|
|
|
|
file->path = prev_path;
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-12 15:10:25 +03:00
|
|
|
/*
|
|
|
|
* Rename FULL backup directory.
|
|
|
|
*/
|
|
|
|
elog(INFO, "Rename %s to %s", to_backup_id, from_backup_id);
|
|
|
|
if (rename(to_backup_path, from_backup_path) == -1)
|
|
|
|
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
|
|
|
|
to_backup_path, from_backup_path, strerror(errno));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Merging finished, now we can safely update ID of the destination backup.
|
|
|
|
*/
|
|
|
|
to_backup->start_time = from_backup->start_time;
|
|
|
|
write_backup(to_backup);
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/* Cleanup */
|
2018-11-13 15:49:09 +03:00
|
|
|
if (threads)
|
|
|
|
{
|
|
|
|
pfree(threads_args);
|
|
|
|
pfree(threads);
|
|
|
|
}
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
parray_walk(to_files, pgFileFree);
|
|
|
|
parray_free(to_files);
|
|
|
|
|
|
|
|
parray_walk(files, pgFileFree);
|
|
|
|
parray_free(files);
|
|
|
|
|
|
|
|
pfree(to_backup_id);
|
|
|
|
pfree(from_backup_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Thread worker of merge_backups().
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
merge_files(void *arg)
|
|
|
|
{
|
|
|
|
merge_files_arg *argument = (merge_files_arg *) arg;
|
|
|
|
pgBackup *to_backup = argument->to_backup;
|
|
|
|
pgBackup *from_backup = argument->from_backup;
|
2018-08-03 13:58:16 +03:00
|
|
|
int i,
|
|
|
|
num_files = parray_num(argument->files);
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-08-03 13:58:16 +03:00
|
|
|
for (i = 0; i < num_files; i++)
|
2018-08-02 11:57:39 +03:00
|
|
|
{
|
|
|
|
pgFile *file = (pgFile *) parray_get(argument->files, i);
|
2018-12-07 17:24:21 +03:00
|
|
|
pgFile *to_file;
|
|
|
|
pgFile **res_file;
|
2019-03-01 16:16:58 +03:00
|
|
|
char to_file_path[MAXPGPATH]; /* Path of target file */
|
2018-12-14 18:41:23 +03:00
|
|
|
char from_file_path[MAXPGPATH];
|
|
|
|
char *prev_file_path;
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
if (!pg_atomic_test_set_flag(&file->lock))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* check for interrupt */
|
|
|
|
if (interrupted)
|
|
|
|
elog(ERROR, "Interrupted during merging backups");
|
|
|
|
|
2018-12-07 17:24:21 +03:00
|
|
|
/* Directories were created before */
|
|
|
|
if (S_ISDIR(file->mode))
|
|
|
|
continue;
|
|
|
|
|
2018-08-03 13:58:16 +03:00
|
|
|
if (progress)
|
2018-12-06 16:03:29 +03:00
|
|
|
elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
|
2018-08-03 13:58:16 +03:00
|
|
|
i + 1, num_files, file->path);
|
|
|
|
|
2018-12-07 17:24:21 +03:00
|
|
|
res_file = parray_bsearch(argument->to_files, file,
|
|
|
|
pgFileComparePathDesc);
|
|
|
|
to_file = (res_file) ? *res_file : NULL;
|
|
|
|
|
2019-03-01 16:16:58 +03:00
|
|
|
join_path_components(to_file_path, argument->to_root, file->path);
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/*
|
|
|
|
* Skip files which haven't changed since previous backup. But in case
|
|
|
|
* of DELTA backup we should consider n_blocks to truncate the target
|
|
|
|
* backup.
|
|
|
|
*/
|
2018-12-07 17:24:21 +03:00
|
|
|
if (file->write_size == BYTES_INVALID && file->n_blocks == -1)
|
2018-08-02 11:57:39 +03:00
|
|
|
{
|
|
|
|
elog(VERBOSE, "Skip merging file \"%s\", the file didn't change",
|
|
|
|
file->path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the file wasn't changed in PAGE backup, retreive its
|
|
|
|
* write_size from previous FULL backup.
|
|
|
|
*/
|
2018-12-07 17:24:21 +03:00
|
|
|
if (to_file)
|
2018-08-02 11:57:39 +03:00
|
|
|
{
|
2018-12-07 17:24:21 +03:00
|
|
|
file->compress_alg = to_file->compress_alg;
|
|
|
|
file->write_size = to_file->write_size;
|
2019-03-01 16:16:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recalculate crc for backup prior to 2.0.25.
|
|
|
|
*/
|
|
|
|
if (parse_program_version(from_backup->program_version) < 20025)
|
|
|
|
file->crc = pgFileGetCRC(to_file_path, true, true, NULL);
|
|
|
|
/* Otherwise just get it from the previous file */
|
|
|
|
else
|
|
|
|
file->crc = to_file->crc;
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-12-14 18:41:23 +03:00
|
|
|
/* We need to make full path, file object has relative path */
|
|
|
|
join_path_components(from_file_path, argument->from_root, file->path);
|
|
|
|
prev_file_path = file->path;
|
|
|
|
file->path = from_file_path;
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/*
|
|
|
|
* Move the file. We need to decompress it and compress again if
|
|
|
|
* necessary.
|
|
|
|
*/
|
2018-12-14 18:41:23 +03:00
|
|
|
elog(VERBOSE, "Merging file \"%s\", is_datafile %d, is_cfs %d",
|
2018-08-02 11:57:39 +03:00
|
|
|
file->path, file->is_database, file->is_cfs);
|
|
|
|
|
|
|
|
if (file->is_datafile && !file->is_cfs)
|
|
|
|
{
|
|
|
|
/*
|
2018-12-07 17:24:21 +03:00
|
|
|
* We need more complicate algorithm if target file should be
|
2018-08-02 11:57:39 +03:00
|
|
|
* compressed.
|
|
|
|
*/
|
|
|
|
if (to_backup->compress_alg == PGLZ_COMPRESS ||
|
|
|
|
to_backup->compress_alg == ZLIB_COMPRESS)
|
|
|
|
{
|
2018-10-05 17:26:39 +03:00
|
|
|
char tmp_file_path[MAXPGPATH];
|
2018-08-02 11:57:39 +03:00
|
|
|
char *prev_path;
|
|
|
|
|
2018-12-14 18:41:23 +03:00
|
|
|
snprintf(tmp_file_path, MAXPGPATH, "%s_tmp", to_file_path);
|
2018-10-05 17:26:39 +03:00
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/* Start the magic */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Merge files:
|
2018-12-07 17:24:21 +03:00
|
|
|
* - if target file exists restore and decompress it to the temp
|
|
|
|
* path
|
|
|
|
* - decompress source file if necessary and merge it with the
|
|
|
|
* target decompressed file
|
2018-08-02 11:57:39 +03:00
|
|
|
* - compress result file
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2018-12-07 17:24:21 +03:00
|
|
|
* We need to decompress target file if it exists.
|
2018-08-02 11:57:39 +03:00
|
|
|
*/
|
2018-12-07 17:24:21 +03:00
|
|
|
if (to_file)
|
2018-08-02 11:57:39 +03:00
|
|
|
{
|
2018-12-07 17:24:21 +03:00
|
|
|
elog(VERBOSE, "Merge target and source files into the temporary path \"%s\"",
|
|
|
|
tmp_file_path);
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
/*
|
|
|
|
* file->path points to the file in from_root directory. But we
|
|
|
|
* need the file in directory to_root.
|
|
|
|
*/
|
2018-12-07 17:24:21 +03:00
|
|
|
prev_path = to_file->path;
|
2018-12-14 18:41:23 +03:00
|
|
|
to_file->path = to_file_path;
|
2018-12-07 17:24:21 +03:00
|
|
|
/* Decompress target file into temporary one */
|
|
|
|
restore_data_file(tmp_file_path, to_file, false, false,
|
2018-11-08 15:04:55 +03:00
|
|
|
parse_program_version(to_backup->program_version));
|
2018-12-07 17:24:21 +03:00
|
|
|
to_file->path = prev_path;
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
2018-12-07 17:24:21 +03:00
|
|
|
else
|
|
|
|
elog(VERBOSE, "Restore source file into the temporary path \"%s\"",
|
|
|
|
tmp_file_path);
|
|
|
|
/* Merge source file with target file */
|
2018-08-02 11:57:39 +03:00
|
|
|
restore_data_file(tmp_file_path, file,
|
|
|
|
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
2018-11-08 15:04:55 +03:00
|
|
|
false,
|
|
|
|
parse_program_version(from_backup->program_version));
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-12-14 18:41:23 +03:00
|
|
|
elog(VERBOSE, "Compress file and save it into the directory \"%s\"",
|
2018-08-02 11:57:39 +03:00
|
|
|
argument->to_root);
|
|
|
|
|
2018-12-07 17:24:21 +03:00
|
|
|
/* Again we need to change path */
|
|
|
|
prev_path = file->path;
|
2018-08-02 11:57:39 +03:00
|
|
|
file->path = tmp_file_path;
|
|
|
|
/* backup_data_file() requires file size to calculate nblocks */
|
|
|
|
file->size = pgFileSize(file->path);
|
|
|
|
/* Now we can compress the file */
|
|
|
|
backup_data_file(NULL, /* We shouldn't need 'arguments' here */
|
2018-12-14 18:41:23 +03:00
|
|
|
to_file_path, file,
|
2018-08-02 11:57:39 +03:00
|
|
|
to_backup->start_lsn,
|
|
|
|
to_backup->backup_mode,
|
|
|
|
to_backup->compress_alg,
|
|
|
|
to_backup->compress_level);
|
|
|
|
|
|
|
|
file->path = prev_path;
|
|
|
|
|
|
|
|
/* We can remove temporary file now */
|
|
|
|
if (unlink(tmp_file_path))
|
|
|
|
elog(ERROR, "Could not remove temporary file \"%s\": %s",
|
|
|
|
tmp_file_path, strerror(errno));
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Otherwise merging algorithm is simpler.
|
|
|
|
*/
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We can merge in-place here */
|
2018-12-14 18:41:23 +03:00
|
|
|
restore_data_file(to_file_path, file,
|
2018-08-02 11:57:39 +03:00
|
|
|
from_backup->backup_mode == BACKUP_MODE_DIFF_DELTA,
|
2018-11-08 15:04:55 +03:00
|
|
|
true,
|
|
|
|
parse_program_version(from_backup->program_version));
|
2018-08-02 11:57:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to calculate write_size, restore_data_file() doesn't
|
|
|
|
* do that.
|
|
|
|
*/
|
2018-12-14 18:41:23 +03:00
|
|
|
file->write_size = pgFileSize(to_file_path);
|
|
|
|
file->crc = pgFileGetCRC(to_file_path, true, true, NULL);
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
}
|
2018-11-26 14:17:46 +03:00
|
|
|
else if (strcmp(file->name, "pg_control") == 0)
|
|
|
|
copy_pgcontrol_file(argument->from_root, argument->to_root, file);
|
2018-08-02 11:57:39 +03:00
|
|
|
else
|
2018-10-16 18:13:27 +03:00
|
|
|
copy_file(argument->from_root, argument->to_root, file);
|
2018-08-02 11:57:39 +03:00
|
|
|
|
2018-12-12 15:10:25 +03:00
|
|
|
/*
|
|
|
|
* We need to save compression algorithm type of the target backup to be
|
|
|
|
* able to restore in the future.
|
|
|
|
*/
|
|
|
|
file->compress_alg = to_backup->compress_alg;
|
|
|
|
|
2018-08-02 11:57:39 +03:00
|
|
|
if (file->write_size != BYTES_INVALID)
|
2018-12-14 18:41:23 +03:00
|
|
|
elog(LOG, "Merged file \"%s\": " INT64_FORMAT " bytes",
|
2018-08-02 11:57:39 +03:00
|
|
|
file->path, file->write_size);
|
2018-12-14 18:41:23 +03:00
|
|
|
|
|
|
|
/* Restore relative path */
|
|
|
|
file->path = prev_file_path;
|
2018-08-02 11:57:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Data files merging is successful */
|
|
|
|
argument->ret = 0;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|