1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-04-18 12:33:33 +02:00

[PBCKP-129] change catchup logging levels (#473)

* [PBCKP-129] change catchup logging level verbosity:
INFO – common information
LOG – same as INFO + info about files
VERBOSE – same as LOG + info about block and SQL queries
This commit is contained in:
Mikhail A. Kulagin 2022-08-04 17:16:17 +03:00 committed by GitHub
parent a2811effa4
commit 2e1950a7ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 93 additions and 96 deletions

View File

@ -3,7 +3,7 @@
* archive.c: - pg_probackup specific archive commands for archive backups.
*
*
* Portions Copyright (c) 2018-2021, Postgres Professional
* Portions Copyright (c) 2018-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -361,7 +361,7 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir,
canonicalize_path(wal_file_ready);
canonicalize_path(wal_file_done);
/* It is ok to rename status file in archive_status directory */
elog(VERBOSE, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done);
elog(LOG, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done);
/* do not error out, if rename failed */
if (fio_rename(wal_file_ready, wal_file_done, FIO_DB_HOST) < 0)
@ -505,7 +505,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d
}
part_opened:
elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_part);
elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_part);
/* Check if possible to skip copying */
if (fileExists(to_fullpath, FIO_BACKUP_HOST))
{
@ -595,7 +595,7 @@ part_opened:
to_fullpath_part, strerror(errno));
}
elog(VERBOSE, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath);
elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath);
//copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true);
@ -752,7 +752,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir,
}
part_opened:
elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part);
elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part);
/* Check if possible to skip copying,
*/
if (fileExists(to_fullpath_gz, FIO_BACKUP_HOST))
@ -844,7 +844,7 @@ part_opened:
to_fullpath_gz_part, strerror(errno));
}
elog(VERBOSE, "Rename \"%s\" to \"%s\"",
elog(LOG, "Rename \"%s\" to \"%s\"",
to_fullpath_gz_part, to_fullpath_gz);
//copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true);
@ -1155,7 +1155,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha
if (get_wal_file(wal_file_name, backup_wal_file_path, absolute_wal_file_path, false))
{
fail_count = 0;
elog(INFO, "pg_probackup archive-get copied WAL file %s", wal_file_name);
elog(LOG, "pg_probackup archive-get copied WAL file %s", wal_file_name);
n_fetched++;
break;
}
@ -1511,7 +1511,7 @@ get_wal_file_internal(const char *from_path, const char *to_path, FILE *out,
char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */
int exit_code = 0;
elog(VERBOSE, "Attempting to %s WAL file '%s'",
elog(LOG, "Attempting to %s WAL file '%s'",
is_decompress ? "open compressed" : "open", from_path);
/* open source file for read */

View File

@ -3,7 +3,7 @@
* backup.c: backup DB cluster, archived WAL
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -116,7 +116,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
char pretty_time[20];
char pretty_bytes[20];
elog(LOG, "Database backup start");
elog(INFO, "Database backup start");
if(current.external_dir_str)
{
external_dirs = make_external_directory_list(current.external_dir_str,
@ -336,11 +336,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
/* Extract information about files in backup_list parsing their names:*/
parse_filelist_filenames(backup_files_list, instance_config.pgdata);
elog(LOG, "Current Start LSN: %X/%X, TLI: %X",
elog(INFO, "Current Start LSN: %X/%X, TLI: %X",
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
current.tli);
if (current.backup_mode != BACKUP_MODE_FULL)
elog(LOG, "Parent Start LSN: %X/%X, TLI: %X",
elog(INFO, "Parent Start LSN: %X/%X, TLI: %X",
(uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn),
prev_backup->tli);
@ -412,7 +412,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
else
join_path_components(dirpath, current.database_dir, file->rel_path);
elog(VERBOSE, "Create directory '%s'", dirpath);
elog(LOG, "Create directory '%s'", dirpath);
fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST);
}
@ -673,7 +673,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo)
nodeInfo->checksum_version = current.checksum_version;
if (current.checksum_version)
elog(LOG, "This PostgreSQL instance was initialized with data block checksums. "
elog(INFO, "This PostgreSQL instance was initialized with data block checksums. "
"Data block corruption will be detected");
else
elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. "
@ -1513,7 +1513,7 @@ wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBa
stop_lsn_exists = true;
}
elog(LOG, "stop_lsn: %X/%X",
elog(INFO, "stop_lsn: %X/%X",
(uint32) (stop_lsn >> 32), (uint32) (stop_lsn));
/*
@ -1902,7 +1902,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb
backup->recovery_xid = stop_backup_result.snapshot_xid;
elog(LOG, "Getting the Recovery Time from WAL");
elog(INFO, "Getting the Recovery Time from WAL");
/* iterate over WAL from stop_backup lsn to start_backup lsn */
if (!read_recovery_info(xlog_path, backup->tli,
@ -1910,7 +1910,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb
backup->start_lsn, backup->stop_lsn,
&backup->recovery_time))
{
elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
elog(INFO, "Failed to find Recovery Time in WAL, forced to trust current_timestamp");
backup->recovery_time = stop_backup_result.invocation_time;
}
@ -1992,9 +1992,8 @@ backup_files(void *arg)
if (interrupted || thread_interrupted)
elog(ERROR, "interrupted during backup");
if (progress)
elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
i + 1, n_backup_files_list, file->rel_path);
elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"",
i + 1, n_backup_files_list, file->rel_path);
/* Handle zero sized files */
if (file->size == 0)
@ -2064,11 +2063,11 @@ backup_files(void *arg)
if (file->write_size == BYTES_INVALID)
{
elog(VERBOSE, "Skipping the unchanged file: \"%s\"", from_fullpath);
elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath);
continue;
}
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes",
from_fullpath, file->write_size);
}
@ -2186,26 +2185,26 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i)
elog(ERROR, "Out of memory");
len = strlen("/pg_compression");
cfs_tblspc_path[strlen(cfs_tblspc_path) - len] = 0;
elog(VERBOSE, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative);
elog(LOG, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative);
for (p = (int) i; p >= 0; p--)
{
prev_file = (pgFile *) parray_get(files, (size_t) p);
elog(VERBOSE, "Checking file in cfs tablespace %s", prev_file->rel_path);
elog(LOG, "Checking file in cfs tablespace %s", prev_file->rel_path);
if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL)
{
if (S_ISREG(prev_file->mode) && prev_file->is_datafile)
{
elog(VERBOSE, "Setting 'is_cfs' on file %s, name %s",
elog(LOG, "Setting 'is_cfs' on file %s, name %s",
prev_file->rel_path, prev_file->name);
prev_file->is_cfs = true;
}
}
else
{
elog(VERBOSE, "Breaking on %s", prev_file->rel_path);
elog(LOG, "Breaking on %s", prev_file->rel_path);
break;
}
}

View File

@ -2,7 +2,7 @@
*
* catchup.c: sync DB cluster
*
* Copyright (c) 2022, Postgres Professional
* Copyright (c) 2021-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -203,7 +203,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn,
/* fill dest_redo.lsn and dest_redo.tli */
get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo);
elog(VERBOSE, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X",
elog(LOG, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X",
current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli);
if (current.tli != 1)
@ -398,9 +398,8 @@ catchup_thread_runner(void *arg)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during catchup");
if (progress)
elog(INFO, "Progress: (%d/%d). Process file \"%s\"",
i + 1, n_files, file->rel_path);
elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"",
i + 1, n_files, file->rel_path);
/* construct destination filepath */
Assert(file->external_dir_num == 0);
@ -447,12 +446,12 @@ catchup_thread_runner(void *arg)
if (file->write_size == BYTES_INVALID)
{
elog(VERBOSE, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size);
elog(LOG, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size);
continue;
}
arguments->transfered_bytes += file->write_size;
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes",
from_fullpath, file->write_size);
}
@ -607,7 +606,7 @@ filter_filelist(parray *filelist, const char *pgdata,
&& parray_bsearch(exclude_relative_paths_list, file->rel_path, pgPrefixCompareString)!= NULL)
)
{
elog(LOG, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path);
elog(INFO, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path);
file->excluded = true;
}
}
@ -650,7 +649,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
if (exclude_relative_paths_list != NULL)
parray_qsort(exclude_relative_paths_list, pgCompareString);
elog(LOG, "Database catchup start");
elog(INFO, "Database catchup start");
if (current.backup_mode != BACKUP_MODE_FULL)
{
@ -697,7 +696,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
/* Call pg_start_backup function in PostgreSQL connect */
pg_start_backup(label, smooth_checkpoint, &current, &source_node_info, source_conn);
elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
elog(INFO, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn));
}
/* Sanity: source cluster must be "in future" relatively to dest cluster */
@ -772,11 +771,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes);
}
elog(LOG, "Start LSN (source): %X/%X, TLI: %X",
elog(INFO, "Start LSN (source): %X/%X, TLI: %X",
(uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn),
current.tli);
if (current.backup_mode != BACKUP_MODE_FULL)
elog(LOG, "LSN in destination: %X/%X, TLI: %X",
elog(INFO, "LSN in destination: %X/%X, TLI: %X",
(uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn),
dest_redo.tli);
@ -829,7 +828,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
char dirpath[MAXPGPATH];
join_path_components(dirpath, dest_pgdata, file->rel_path);
elog(VERBOSE, "Create directory '%s'", dirpath);
elog(LOG, "Create directory '%s'", dirpath);
if (!dry_run)
fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST);
}
@ -859,7 +859,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
join_path_components(to_path, dest_pgdata, file->rel_path);
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
elog(INFO, "Create directory \"%s\" and symbolic link \"%s\"",
linked_path, to_path);
if (!dry_run)
@ -946,7 +946,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads,
{
fio_delete(file->mode, fullpath, FIO_LOCAL_HOST);
}
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
elog(LOG, "Deleted file \"%s\"", fullpath);
/* shrink dest pgdata list */
pgFileFree(file);

View File

@ -3,7 +3,7 @@
* data.c: utils to parse and backup data pages
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -696,7 +696,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa
use_pagemap = true;
if (use_pagemap)
elog(VERBOSE, "Using pagemap for file \"%s\"", file->rel_path);
elog(LOG, "Using pagemap for file \"%s\"", file->rel_path);
/* Remote mode */
if (fio_is_remote(FIO_DB_HOST))
@ -795,7 +795,7 @@ backup_non_data_file(pgFile *file, pgFile *prev_file,
}
/*
* If nonedata file exists in previous backup
* If non-data file exists in previous backup
* and its mtime is less than parent backup start time ... */
if ((pg_strcasecmp(file->name, RELMAPPER_FILENAME) != 0) &&
(prev_file && file->exists_in_prev &&
@ -1197,7 +1197,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers
datapagemap_add(map, blknum);
}
elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, write_len);
elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, write_len);
return write_len;
}
@ -1220,7 +1220,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file,
/* check for interrupt */
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during nonedata file restore");
elog(ERROR, "Interrupted during non-data file restore");
read_len = fread(buf, 1, STDIO_BUFSIZE, in);
@ -1241,7 +1241,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file,
pg_free(buf);
elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size);
elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size);
}
size_t
@ -1286,7 +1286,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup,
*/
if (!tmp_file)
{
elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s",
elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s",
dest_file->rel_path, base36enc(tmp_backup->start_time));
continue;
}
@ -1311,14 +1311,14 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup,
/* sanity */
if (!tmp_backup)
elog(ERROR, "Failed to locate a backup containing full copy of nonedata file \"%s\"",
elog(ERROR, "Failed to locate a backup containing full copy of non-data file \"%s\"",
to_fullpath);
if (!tmp_file)
elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", to_fullpath);
elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", to_fullpath);
if (tmp_file->write_size <= 0)
elog(ERROR, "Full copy of nonedata file has invalid size: %li. "
elog(ERROR, "Full copy of non-data file has invalid size: %li. "
"Metadata corruption in backup %s in file: \"%s\"",
tmp_file->write_size, base36enc(tmp_backup->start_time),
to_fullpath);
@ -1331,7 +1331,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup,
if (file_crc == tmp_file->crc)
{
elog(VERBOSE, "Already existing nonedata file \"%s\" has the same checksum, skip restore",
elog(LOG, "Already existing non-data file \"%s\" has the same checksum, skip restore",
to_fullpath);
return 0;
}
@ -1359,7 +1359,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup,
elog(ERROR, "Cannot open backup file \"%s\": %s", from_fullpath,
strerror(errno));
/* disable stdio buffering for nonedata files */
/* disable stdio buffering for non-data files */
setvbuf(in, NULL, _IONBF, BUFSIZ);
/* do actual work */
@ -1683,7 +1683,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
int n_hdr = -1;
off_t cur_pos_in = 0;
elog(VERBOSE, "Validate relation blocks for file \"%s\"", fullpath);
elog(LOG, "Validate relation blocks for file \"%s\"", fullpath);
/* should not be possible */
Assert(!(backup_version >= 20400 && file->n_headers <= 0));
@ -1742,7 +1742,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
elog(ERROR, "Cannot seek block %u of \"%s\": %s",
blknum, fullpath, strerror(errno));
else
elog(INFO, "Seek to %u", headers[n_hdr].pos);
elog(VERBOSE, "Seek to %u", headers[n_hdr].pos);
cur_pos_in = headers[n_hdr].pos;
}
@ -1766,7 +1766,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
/* backward compatibility kludge TODO: remove in 3.0 */
if (compressed_size == PageIsTruncated)
{
elog(INFO, "Block %u of \"%s\" is truncated",
elog(VERBOSE, "Block %u of \"%s\" is truncated",
blknum, fullpath);
continue;
}
@ -1837,10 +1837,10 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn,
switch (rc)
{
case PAGE_IS_NOT_FOUND:
elog(LOG, "File \"%s\", block %u, page is NULL", file->rel_path, blknum);
elog(VERBOSE, "File \"%s\", block %u, page is NULL", file->rel_path, blknum);
break;
case PAGE_IS_ZEROED:
elog(LOG, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum);
elog(VERBOSE, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum);
break;
case PAGE_HEADER_IS_INVALID:
elog(WARNING, "Page header is looking insane: %s, block %i", file->rel_path, blknum);

View File

@ -3,7 +3,7 @@
* dir.c: directory operation utility.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -640,7 +640,7 @@ dir_check_file(pgFile *file, bool backup_logs)
pgdata_exclude_files_non_exclusive[i]) == 0)
{
/* Skip */
elog(VERBOSE, "Excluding file: %s", file->name);
elog(LOG, "Excluding file: %s", file->name);
return CHECK_FALSE;
}
}
@ -649,7 +649,7 @@ dir_check_file(pgFile *file, bool backup_logs)
if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0)
{
/* Skip */
elog(VERBOSE, "Excluding file: %s", file->name);
elog(LOG, "Excluding file: %s", file->name);
return CHECK_FALSE;
}
}
@ -669,7 +669,7 @@ dir_check_file(pgFile *file, bool backup_logs)
/* exclude by dirname */
if (strcmp(file->name, pgdata_exclude_dir[i]) == 0)
{
elog(VERBOSE, "Excluding directory content: %s", file->rel_path);
elog(LOG, "Excluding directory content: %s", file->rel_path);
return CHECK_EXCLUDE_FALSE;
}
}
@ -679,7 +679,7 @@ dir_check_file(pgFile *file, bool backup_logs)
if (strcmp(file->rel_path, PG_LOG_DIR) == 0)
{
/* Skip */
elog(VERBOSE, "Excluding directory content: %s", file->rel_path);
elog(LOG, "Excluding directory content: %s", file->rel_path);
return CHECK_EXCLUDE_FALSE;
}
}
@ -1166,7 +1166,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
join_path_components(to_path, data_dir, dir->rel_path);
elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"",
elog(LOG, "Create directory \"%s\" and symbolic link \"%s\"",
linked_path, to_path);
/* create tablespace directory */
@ -1183,7 +1183,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
}
/* This is not symlink, create directory */
elog(VERBOSE, "Create directory \"%s\"", dir->rel_path);
elog(LOG, "Create directory \"%s\"", dir->rel_path);
join_path_components(to_path, data_dir, dir->rel_path);
@ -1934,7 +1934,7 @@ cleanup_tablespace(const char *path)
join_path_components(fullpath, path, file->rel_path);
fio_delete(file->mode, fullpath, FIO_DB_HOST);
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
elog(LOG, "Deleted file \"%s\"", fullpath);
}
parray_walk(files, pgFileFree);

View File

@ -2,7 +2,7 @@
*
* merge.c: merge FULL and incremental backups
*
* Copyright (c) 2018-2019, Postgres Professional
* Copyright (c) 2018-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -171,7 +171,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool
elog(ERROR, "Merge target is full backup and has multiple direct children, "
"you must specify child backup id you want to merge with");
elog(LOG, "Looking for closest incremental backup to merge with");
elog(INFO, "Looking for closest incremental backup to merge with");
/* Look for closest child backup */
for (i = 0; i < parray_num(backups); i++)
@ -810,7 +810,7 @@ merge_chain(InstanceState *instanceState,
join_path_components(full_file_path, full_database_dir, full_file->rel_path);
pgFileDelete(full_file->mode, full_file_path);
elog(VERBOSE, "Deleted \"%s\"", full_file_path);
elog(LOG, "Deleted \"%s\"", full_file_path);
}
}
@ -956,9 +956,8 @@ merge_files(void *arg)
if (S_ISDIR(dest_file->mode))
goto done;
if (progress)
elog(INFO, "Progress: (%d/%lu). Merging file \"%s\"",
i + 1, n_files, dest_file->rel_path);
elog(progress ? INFO : LOG, "Progress: (%d/%lu). Merging file \"%s\"",
i + 1, n_files, dest_file->rel_path);
if (dest_file->is_datafile && !dest_file->is_cfs)
tmp_file->segno = dest_file->segno;
@ -1063,7 +1062,7 @@ merge_files(void *arg)
{
BackupPageHeader2 *headers = NULL;
elog(VERBOSE, "The file didn`t changed since FULL backup, skip merge: \"%s\"",
elog(LOG, "The file didn`t changed since FULL backup, skip merge: \"%s\"",
file->rel_path);
tmp_file->crc = file->crc;
@ -1144,7 +1143,7 @@ remove_dir_with_files(const char *path)
join_path_components(full_path, path, file->rel_path);
pgFileDelete(file->mode, full_path);
elog(VERBOSE, "Deleted \"%s\"", full_path);
elog(LOG, "Deleted \"%s\"", full_path);
}
/* cleanup */
@ -1193,7 +1192,7 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external,
char new_path[MAXPGPATH];
makeExternalDirPathByNum(old_path, externaldir_template, i + 1);
makeExternalDirPathByNum(new_path, externaldir_template, from_num);
elog(VERBOSE, "Rename %s to %s", old_path, new_path);
elog(LOG, "Rename %s to %s", old_path, new_path);
if (rename (old_path, new_path) == -1)
elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s",
old_path, new_path, strerror(errno));
@ -1346,7 +1345,7 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup,
*/
if (!from_file)
{
elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s",
elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s",
dest_file->rel_path, base36enc(from_backup->start_time));
continue;
}
@ -1357,11 +1356,11 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup,
/* sanity */
if (!from_backup)
elog(ERROR, "Failed to found a backup containing full copy of nonedata file \"%s\"",
elog(ERROR, "Failed to found a backup containing full copy of non-data file \"%s\"",
dest_file->rel_path);
if (!from_file)
elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", dest_file->rel_path);
elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", dest_file->rel_path);
/* set path to source file */
if (from_file->external_dir_num)
@ -1450,4 +1449,4 @@ is_forward_compatible(parray *parent_chain)
}
return true;
}
}

View File

@ -3,7 +3,7 @@
* restore.c: restore DB cluster and archived WAL.
*
* Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2022, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -843,7 +843,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
external_path = parray_get(external_dirs, file->external_dir_num - 1);
join_path_components(dirpath, external_path, file->rel_path);
elog(VERBOSE, "Create external directory \"%s\"", dirpath);
elog(LOG, "Create external directory \"%s\"", dirpath);
fio_mkdir(dirpath, file->mode, FIO_DB_HOST);
}
}
@ -923,7 +923,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain,
join_path_components(fullpath, pgdata_path, file->rel_path);
fio_delete(file->mode, fullpath, FIO_DB_HOST);
elog(VERBOSE, "Deleted file \"%s\"", fullpath);
elog(LOG, "Deleted file \"%s\"", fullpath);
/* shrink pgdata list */
pgFileFree(file);
@ -1131,9 +1131,8 @@ restore_files(void *arg)
if (interrupted || thread_interrupted)
elog(ERROR, "Interrupted during restore");
if (progress)
elog(INFO, "Progress: (%d/%lu). Restore file \"%s\"",
i + 1, n_files, dest_file->rel_path);
elog(progress ? INFO : LOG, "Progress: (%d/%lu). Restore file \"%s\"",
i + 1, n_files, dest_file->rel_path);
/* Only files from pgdata can be skipped by partial restore */
if (arguments->dbOid_exclude_list && dest_file->external_dir_num == 0)
@ -1149,7 +1148,7 @@ restore_files(void *arg)
create_empty_file(FIO_BACKUP_HOST,
arguments->to_root, FIO_DB_HOST, dest_file);
elog(VERBOSE, "Skip file due to partial restore: \"%s\"",
elog(LOG, "Skip file due to partial restore: \"%s\"",
dest_file->rel_path);
continue;
}
@ -1159,7 +1158,7 @@ restore_files(void *arg)
if ((dest_file->external_dir_num == 0) &&
strcmp(PG_TABLESPACE_MAP_FILE, dest_file->rel_path) == 0)
{
elog(VERBOSE, "Skip tablespace_map");
elog(LOG, "Skip tablespace_map");
continue;
}
@ -1167,7 +1166,7 @@ restore_files(void *arg)
if ((dest_file->external_dir_num == 0) &&
strcmp(DATABASE_MAP, dest_file->rel_path) == 0)
{
elog(VERBOSE, "Skip database_map");
elog(LOG, "Skip database_map");
continue;
}
@ -1239,9 +1238,9 @@ restore_files(void *arg)
strerror(errno));
if (!dest_file->is_datafile || dest_file->is_cfs)
elog(VERBOSE, "Restoring nonedata file: \"%s\"", to_fullpath);
elog(LOG, "Restoring non-data file: \"%s\"", to_fullpath);
else
elog(VERBOSE, "Restoring data file: \"%s\"", to_fullpath);
elog(LOG, "Restoring data file: \"%s\"", to_fullpath);
// If destination file is 0 sized, then just close it and go for the next
if (dest_file->write_size == 0)
@ -1261,10 +1260,10 @@ restore_files(void *arg)
}
else
{
/* disable stdio buffering for local destination nonedata file */
/* disable stdio buffering for local destination non-data file */
if (!fio_is_remote_file(out))
setvbuf(out, NULL, _IONBF, BUFSIZ);
/* Destination file is nonedata file */
/* Destination file is non-data file */
arguments->restored_bytes += restore_non_data_file(arguments->parent_chain,
arguments->dest_backup, dest_file, out, to_fullpath,
already_exists);
@ -1773,7 +1772,7 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict
}
if (fd && (ferror(fd)))
elog(ERROR, "Failed to read from file: \"%s\"", path);
elog(ERROR, "Failed to read from file: \"%s\"", path);
if (fd)
fclose(fd);
@ -2188,7 +2187,7 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier,
* data files content, because based on pg_control information we will
* choose a backup suitable for lsn based incremental restore.
*/
elog(INFO, "Trying to read pg_control file in destination directory");
elog(LOG, "Trying to read pg_control file in destination directory");
system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false);

View File

@ -3,7 +3,7 @@
* util.c: log messages to log file or stderr, and misc code.
*
* Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
* Portions Copyright (c) 2015-2019, Postgres Professional
* Portions Copyright (c) 2015-2021, Postgres Professional
*
*-------------------------------------------------------------------------
*/
@ -589,7 +589,7 @@ datapagemap_print_debug(datapagemap_t *map)
iter = datapagemap_iterate(map);
while (datapagemap_next(iter, &blocknum))
elog(INFO, " block %u", blocknum);
elog(VERBOSE, " block %u", blocknum);
pg_free(iter);
}