mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-02-08 14:28:36 +02:00
[Issue #101] Multiple spelling fixes. Reported by Alexander Lakhin
This commit is contained in:
parent
44923e7fb7
commit
db73f84057
@ -112,9 +112,9 @@ gen_probackup_project.pl C:\path_to_postgresql_source_tree
|
||||
|
||||
Currently the latest documentation can be found at [github](https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md) and [Postgres Pro Enterprise documentation](https://postgrespro.com/docs/postgrespro/current/app-pgprobackup).
|
||||
|
||||
## Licence
|
||||
## License
|
||||
|
||||
This module available under the [license](LICENSE) similar to [PostgreSQL](https://www.postgresql.org/about/licence/).
|
||||
This module available under the [license](LICENSE) similar to [PostgreSQL](https://www.postgresql.org/about/license/).
|
||||
|
||||
## Feedback
|
||||
|
||||
|
@ -5,7 +5,7 @@ our $pgsrc;
|
||||
our $currpath;
|
||||
|
||||
BEGIN {
|
||||
# path to the pg_pprobackup dir
|
||||
# path to the pg_probackup dir
|
||||
$currpath = File::Basename::dirname(Cwd::abs_path($0));
|
||||
use Cwd;
|
||||
use File::Basename;
|
||||
|
16
src/backup.c
16
src/backup.c
@ -28,7 +28,7 @@
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
* NOTE Keep those values synchronized with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
@ -39,7 +39,7 @@ static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
|
||||
|
||||
/*
|
||||
* How long we should wait for streaming end in seconds.
|
||||
* Retreived as checkpoint_timeout + checkpoint_timeout * 0.1
|
||||
* Retrieved as checkpoint_timeout + checkpoint_timeout * 0.1
|
||||
*/
|
||||
static uint32 stream_stop_timeout = 0;
|
||||
/* Time in which we started to wait for streaming end */
|
||||
@ -451,7 +451,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
|
||||
/* Run threads */
|
||||
thread_interrupted = false;
|
||||
elog(INFO, "Start transfering data files");
|
||||
elog(INFO, "Start transferring data files");
|
||||
for (i = 0; i < num_threads; i++)
|
||||
{
|
||||
backup_files_arg *arg = &(threads_args[i]);
|
||||
@ -468,7 +468,7 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
backup_isok = false;
|
||||
}
|
||||
if (backup_isok)
|
||||
elog(INFO, "Data files are transfered");
|
||||
elog(INFO, "Data files are transferred");
|
||||
else
|
||||
elog(ERROR, "Data files transferring failed");
|
||||
|
||||
@ -686,7 +686,7 @@ do_backup(time_t start_time, bool no_validate)
|
||||
/* below perform checks specific for backup command */
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
if (!RetrieveWalSegSize(backup_conn))
|
||||
elog(ERROR, "Failed to retreive wal_segment_size");
|
||||
elog(ERROR, "Failed to retrieve wal_segment_size");
|
||||
#endif
|
||||
|
||||
is_ptrack_support = pg_ptrack_support(backup_conn);
|
||||
@ -1346,7 +1346,7 @@ wait_wal_lsn(XLogRecPtr lsn, bool is_start_lsn, bool wait_prev_segment)
|
||||
|
||||
tli = get_current_timeline(false);
|
||||
|
||||
/* Compute the name of the WAL file containig requested LSN */
|
||||
/* Compute the name of the WAL file containing requested LSN */
|
||||
GetXLogSegNo(lsn, targetSegNo, instance_config.xlog_seg_size);
|
||||
if (wait_prev_segment)
|
||||
targetSegNo--;
|
||||
@ -1862,7 +1862,7 @@ pg_stop_backup(pgBackup *backup, PGconn *pg_startbackup_conn,
|
||||
}
|
||||
|
||||
/*
|
||||
* Retreive checkpoint_timeout GUC value in seconds.
|
||||
* Retrieve checkpoint_timeout GUC value in seconds.
|
||||
*/
|
||||
static int
|
||||
checkpoint_timeout(PGconn *backup_conn)
|
||||
@ -2360,7 +2360,7 @@ make_pagemap_from_ptrack(parray *files, PGconn *backup_conn)
|
||||
if (ptrack_nonparsed != NULL)
|
||||
{
|
||||
/*
|
||||
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cutted out.
|
||||
* pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
|
||||
* Compute the beginning of the ptrack map related to this segment
|
||||
*
|
||||
* HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
|
||||
|
@ -260,7 +260,7 @@ lock_backup(pgBackup *backup)
|
||||
|
||||
fio_unlink(lock_file, FIO_BACKUP_HOST);
|
||||
errno = save_errno;
|
||||
elog(ERROR, "Culd not write lock file \"%s\": %s",
|
||||
elog(ERROR, "Could not write lock file \"%s\": %s",
|
||||
lock_file, strerror(errno));
|
||||
}
|
||||
|
||||
@ -1022,7 +1022,7 @@ parse_compress_alg(const char *arg)
|
||||
len = strlen(arg);
|
||||
|
||||
if (len == 0)
|
||||
elog(ERROR, "compress algrorithm is empty");
|
||||
elog(ERROR, "compress algorithm is empty");
|
||||
|
||||
if (pg_strncasecmp("zlib", arg, len) == 0)
|
||||
return ZLIB_COMPRESS;
|
||||
@ -1231,7 +1231,7 @@ find_parent_full_backup(pgBackup *current_backup)
|
||||
}
|
||||
|
||||
/*
|
||||
* Interate over parent chain and look for any problems.
|
||||
* Iterate over parent chain and look for any problems.
|
||||
* Return 0 if chain is broken.
|
||||
* result_backup must contain oldest existing backup after missing backup.
|
||||
* we have no way to know if there are multiple missing backups.
|
||||
@ -1262,7 +1262,7 @@ scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
|
||||
target_backup = target_backup->parent_backup_link;
|
||||
}
|
||||
|
||||
/* Prevous loop will skip FULL backup because his parent_backup_link is NULL */
|
||||
/* Previous loop will skip FULL backup because his parent_backup_link is NULL */
|
||||
if (target_backup->backup_mode == BACKUP_MODE_FULL &&
|
||||
(target_backup->status != BACKUP_STATUS_OK &&
|
||||
target_backup->status != BACKUP_STATUS_DONE))
|
||||
|
@ -81,7 +81,7 @@ typedef struct pg_indexEntry
|
||||
char *name;
|
||||
char *namespace;
|
||||
bool heapallindexed_is_supported;
|
||||
/* schema where amcheck extention is located */
|
||||
/* schema where amcheck extension is located */
|
||||
char *amcheck_nspname;
|
||||
/* lock for synchronization of parallel threads */
|
||||
volatile pg_atomic_flag lock;
|
||||
@ -408,7 +408,7 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
|
||||
PQgetvalue(res, 0, 2), PQgetvalue(res, 0, 1));
|
||||
|
||||
if (!heapallindexed_is_supported && heapallindexed)
|
||||
elog(WARNING, "Extension '%s' verion %s in schema '%s'"
|
||||
elog(WARNING, "Extension '%s' version %s in schema '%s'"
|
||||
"do not support 'heapallindexed' option",
|
||||
PQgetvalue(res, 0, 0), PQgetvalue(res, 0, 2),
|
||||
PQgetvalue(res, 0, 1));
|
||||
|
10
src/data.c
10
src/data.c
@ -159,7 +159,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version)
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
/* otherwize let's try to decompress the page */
|
||||
/* otherwise let's try to decompress the page */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -396,7 +396,7 @@ prepare_page(ConnectionArgs *arguments,
|
||||
{
|
||||
/*
|
||||
* We need to copy the page that was successfully
|
||||
* retreieved from ptrack into our output "page" parameter.
|
||||
* retrieved from ptrack into our output "page" parameter.
|
||||
* We must set checksum here, because it is outdated
|
||||
* in the block recieved from shared buffers.
|
||||
*/
|
||||
@ -482,7 +482,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
|
||||
compressed_page, header.compressed_size);
|
||||
write_buffer_size += MAXALIGN(header.compressed_size);
|
||||
}
|
||||
/* Nonpositive value means that compression failed. Write it as is. */
|
||||
/* Non-positive value means that compression failed. Write it as is. */
|
||||
else
|
||||
{
|
||||
header.compressed_size = BLCKSZ;
|
||||
@ -754,7 +754,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
DataPage page;
|
||||
int32 uncompressed_size = 0;
|
||||
|
||||
/* File didn`t changed. Nothig to copy */
|
||||
/* File didn`t changed. Nothing to copy */
|
||||
if (file->write_size == BYTES_INVALID)
|
||||
break;
|
||||
|
||||
@ -887,7 +887,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
* DELTA backup have no knowledge about truncated blocks as PAGE or PTRACK do
|
||||
* But during DELTA backup we read every file in PGDATA and thus DELTA backup
|
||||
* knows exact size of every file at the time of backup.
|
||||
* So when restoring file from DELTA backup we, knowning it`s size at
|
||||
* So when restoring file from DELTA backup we, knowing it`s size at
|
||||
* a time of a backup, can truncate file to this size.
|
||||
*/
|
||||
if (allow_truncate && file->n_blocks != BLOCKNUM_INVALID && !need_truncate)
|
||||
|
@ -116,7 +116,7 @@ do_delete(time_t backup_id)
|
||||
*
|
||||
* Invalid backups handled in Oracle style, so invalid backups are ignored
|
||||
* for the purpose of retention fulfillment,
|
||||
* i.e. CORRUPT full backup do not taken in account when deteremine
|
||||
* i.e. CORRUPT full backup do not taken in account when determine
|
||||
* which FULL backup should be keeped for redundancy obligation(only valid do),
|
||||
* but if invalid backup is not guarded by retention - it is removed
|
||||
*/
|
||||
@ -491,7 +491,7 @@ do_retention_merge(parray *backup_list, parray *to_keep_list, parray *to_purge_l
|
||||
* 2 PAGE1
|
||||
* 3 FULL
|
||||
*
|
||||
* Сonsequentially merge incremental backups from PAGE1 to PAGE3
|
||||
* Consequentially merge incremental backups from PAGE1 to PAGE3
|
||||
* into FULL.
|
||||
*/
|
||||
|
||||
|
@ -1075,7 +1075,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba
|
||||
}
|
||||
|
||||
/*
|
||||
* Read names of symbolik names of tablespaces with links to directories from
|
||||
* Read names of symbolic names of tablespaces with links to directories from
|
||||
* tablespace_map or tablespace_map.txt.
|
||||
*/
|
||||
void
|
||||
@ -1568,7 +1568,7 @@ pgFileSize(const char *path)
|
||||
}
|
||||
|
||||
/*
|
||||
* Construct parray containing remmaped external directories paths
|
||||
* Construct parray containing remapped external directories paths
|
||||
* from string like /path1:/path2
|
||||
*/
|
||||
parray *
|
||||
|
@ -540,7 +540,7 @@ help_show(void)
|
||||
printf(_(" [--format=format]\n\n"));
|
||||
|
||||
printf(_(" -B, --backup-path=backup-path location of the backup storage area\n"));
|
||||
printf(_(" --instance=instance_name show info about specific intstance\n"));
|
||||
printf(_(" --instance=instance_name show info about specific instance\n"));
|
||||
printf(_(" -i, --backup-id=backup-id show info about specific backups\n"));
|
||||
printf(_(" --format=format show format=PLAIN|JSON\n\n"));
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
false);
|
||||
|
||||
/*
|
||||
* Rename external directoties in to_backup (if exists)
|
||||
* Rename external directories in to_backup (if exists)
|
||||
* according to numeration of external dirs in from_backup.
|
||||
*/
|
||||
if (to_external)
|
||||
@ -594,6 +594,8 @@ merge_files(void *arg)
|
||||
elog(VERBOSE, "Merge target and source files into the temporary path \"%s\"",
|
||||
merge_to_file_path);
|
||||
|
||||
// TODO: truncate merge_to_file_path just in case?
|
||||
|
||||
/*
|
||||
* file->path is relative, to_file_path - is absolute.
|
||||
* Substitute them.
|
||||
|
@ -228,7 +228,7 @@ static XLogRecPtr wal_target_lsn = InvalidXLogRecPtr;
|
||||
* Read WAL from the archive directory, from 'startpoint' to 'endpoint' on the
|
||||
* given timeline. Collect data blocks touched by the WAL records into a page map.
|
||||
*
|
||||
* Pagemap extracting is processed using threads. Eeach thread reads single WAL
|
||||
* Pagemap extracting is processed using threads. Each thread reads single WAL
|
||||
* file.
|
||||
*/
|
||||
void
|
||||
@ -1491,7 +1491,7 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data,
|
||||
if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
|
||||
continue;
|
||||
|
||||
/* We only care about the main fork; others are copied in toto */
|
||||
/* We only care about the main fork; others are copied as is */
|
||||
if (forknum != MAIN_FORKNUM)
|
||||
continue;
|
||||
|
||||
|
@ -490,7 +490,7 @@ main(int argc, char *argv[])
|
||||
|
||||
/* Usually checkdb for file logging requires log_directory
|
||||
* to be specified explicitly, but if backup_dir and instance name are provided,
|
||||
* checkdb can use the tusual default values or values from config
|
||||
* checkdb can use the usual default values or values from config
|
||||
*/
|
||||
if (backup_subcmd == CHECKDB_CMD &&
|
||||
(instance_config.logger.log_level_file != LOG_OFF &&
|
||||
|
@ -66,7 +66,7 @@ extern const char *PROGRAM_EMAIL;
|
||||
#define ARCHIVE_TIMEOUT_DEFAULT 300
|
||||
#define REPLICA_TIMEOUT_DEFAULT 300
|
||||
|
||||
/* Direcotry/File permission */
|
||||
/* Directory/File permission */
|
||||
#define DIR_PERMISSION (0700)
|
||||
#define FILE_PERMISSION (0600)
|
||||
|
||||
@ -264,7 +264,7 @@ struct pgBackup
|
||||
time_t backup_id; /* Identifier of the backup.
|
||||
* Currently it's the same as start_time */
|
||||
BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/
|
||||
TimeLineID tli; /* timeline of start and stop baskup lsns */
|
||||
TimeLineID tli; /* timeline of start and stop backup lsns */
|
||||
XLogRecPtr start_lsn; /* backup's starting transaction log location */
|
||||
XLogRecPtr stop_lsn; /* backup's finishing transaction log location */
|
||||
time_t start_time; /* since this moment backup has status
|
||||
|
@ -298,7 +298,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
{
|
||||
check_tablespace_mapping(dest_backup);
|
||||
|
||||
/* no point in checking external directories if their restore is not resquested */
|
||||
/* no point in checking external directories if their restore is not requested */
|
||||
if (!skip_external_dirs)
|
||||
check_external_dir_mapping(dest_backup);
|
||||
}
|
||||
@ -377,7 +377,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
rt->target_xid, rt->target_lsn,
|
||||
base_full_backup->tli, instance_config.xlog_seg_size);
|
||||
}
|
||||
/* Orphinize every OK descendant of corrupted backup */
|
||||
/* Orphanize every OK descendant of corrupted backup */
|
||||
else
|
||||
{
|
||||
char *corrupted_backup_id;
|
||||
@ -1064,7 +1064,7 @@ parseRecoveryTargetOptions(const char *target_time,
|
||||
if (parse_lsn(target_lsn, &dummy_lsn))
|
||||
rt->target_lsn = dummy_lsn;
|
||||
else
|
||||
elog(ERROR, "Invalid value of --ecovery-target-lsn option %s",
|
||||
elog(ERROR, "Invalid value of --recovery-target-lsn option %s",
|
||||
target_lsn);
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ typedef struct
|
||||
/* Convert FIO pseudo handle to index in file descriptor array */
|
||||
#define fio_fileno(f) (((size_t)f - 1) | FIO_PIPE_MARKER)
|
||||
|
||||
/* Use specified file descriptors as stding/stdout for FIO functions */
|
||||
/* Use specified file descriptors as stdin/stdout for FIO functions */
|
||||
void fio_redirect(int in, int out)
|
||||
{
|
||||
fio_stdin = in;
|
||||
@ -726,7 +726,7 @@ int fio_access(char const* path, int mode, fio_location location)
|
||||
}
|
||||
}
|
||||
|
||||
/* Create symbolink link */
|
||||
/* Create symbolic link */
|
||||
int fio_symlink(char const* target, char const* link_path, fio_location location)
|
||||
{
|
||||
if (fio_is_remote(location))
|
||||
@ -822,7 +822,7 @@ int fio_mkdir(char const* path, int mode, fio_location location)
|
||||
}
|
||||
}
|
||||
|
||||
/* Checnge file mode */
|
||||
/* Change file mode */
|
||||
int fio_chmod(char const* path, int mode, fio_location location)
|
||||
{
|
||||
if (fio_is_remote(location))
|
||||
@ -954,7 +954,7 @@ fio_gzread(gzFile f, void *buf, unsigned size)
|
||||
|
||||
while (1)
|
||||
{
|
||||
if (gz->strm.avail_in != 0) /* If there is some data in receiver buffer, then decmpress it */
|
||||
if (gz->strm.avail_in != 0) /* If there is some data in receiver buffer, then decompress it */
|
||||
{
|
||||
rc = inflate(&gz->strm, Z_NO_FLUSH);
|
||||
if (rc == Z_STREAM_END)
|
||||
@ -1021,7 +1021,7 @@ fio_gzwrite(gzFile f, void const* buf, unsigned size)
|
||||
{
|
||||
rc = deflate(&gz->strm, Z_NO_FLUSH);
|
||||
Assert(rc == Z_OK);
|
||||
gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of bufer */
|
||||
gz->strm.next_out = gz->buf; /* Reset pointer to the beginning of buffer */
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1429,7 +1429,7 @@ void fio_communicate(int in, int out)
|
||||
case FIO_UNLINK: /* Remove file or directory (TODO: Win32) */
|
||||
SYS_CHECK(remove_file_or_dir(buf));
|
||||
break;
|
||||
case FIO_MKDIR: /* Create direcory */
|
||||
case FIO_MKDIR: /* Create directory */
|
||||
hdr.size = 0;
|
||||
hdr.arg = dir_create_dir(buf, hdr.arg);
|
||||
IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr));
|
||||
|
@ -15,7 +15,7 @@
|
||||
/* members of struct parray are hidden from client. */
|
||||
struct parray
|
||||
{
|
||||
void **data; /* poiter array, expanded if necessary */
|
||||
void **data; /* pointer array, expanded if necessary */
|
||||
size_t alloced; /* number of elements allocated */
|
||||
size_t used; /* number of elements in use */
|
||||
};
|
||||
@ -97,7 +97,7 @@ parray_insert(parray *array, size_t index, void *elem)
|
||||
}
|
||||
|
||||
/*
|
||||
* Concatinate two parray.
|
||||
* Concatenate two parray.
|
||||
* parray_concat() appends the copy of the content of src to the end of dest.
|
||||
*/
|
||||
parray *
|
||||
|
@ -757,7 +757,7 @@ on_interrupt(void)
|
||||
interrupted = true;
|
||||
|
||||
/*
|
||||
* User promts password, call on_cleanup() byhand. Unless we do that we will
|
||||
* User prompts password, call on_cleanup() byhand. Unless we do that we will
|
||||
* get stuck forever until a user enters a password.
|
||||
*/
|
||||
if (in_password)
|
||||
|
@ -371,7 +371,7 @@ do_validate_all(void)
|
||||
/* TODO: Probably we should have different exit code for every condition
|
||||
* and they combination:
|
||||
* 0 - all backups are valid
|
||||
* 1 - some backups are corrup
|
||||
* 1 - some backups are corrupt
|
||||
* 2 - some backups where skipped due to concurrent locks
|
||||
* 3 - some backups are corrupt and some are skipped due to concurrent locks
|
||||
*/
|
||||
@ -547,7 +547,7 @@ do_validate_instance(void)
|
||||
/* For every OK backup we try to revalidate all his ORPHAN descendants. */
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
/* revalidate all ORPHAN descendats
|
||||
/* revalidate all ORPHAN descendants
|
||||
* be very careful not to miss a missing backup
|
||||
* for every backup we must check that he is descendant of current_backup
|
||||
*/
|
||||
@ -592,7 +592,7 @@ do_validate_instance(void)
|
||||
skipped_due_to_lock = true;
|
||||
continue;
|
||||
}
|
||||
/* Revaliate backup files*/
|
||||
/* Revalidate backup files*/
|
||||
pgBackupValidate(backup);
|
||||
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
|
@ -1,11 +1,11 @@
|
||||
[см wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup)
|
||||
|
||||
```
|
||||
Note: For now these are works on Linix and "kinda" works on Windows
|
||||
Note: For now these are works on Linux and "kinda" works on Windows
|
||||
```
|
||||
|
||||
```
|
||||
Windows Note: For tablespaceses tests to work on Windows, you should explicitly(!) grant current user full access to tmp_dirs
|
||||
Windows Note: For tablespaces tests to work on Windows, you should explicitly(!) grant current user full access to tmp_dirs
|
||||
```
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ Enable compatibility tests:
|
||||
Specify path to pg_probackup binary file. By default tests use <Path to Git repository>/pg_probackup/
|
||||
export PGPROBACKUPBIN=<path to pg_probackup>
|
||||
|
||||
Remote backup depends on key authentithication to local machine via ssh as current user.
|
||||
Remote backup depends on key authentication to local machine via ssh as current user.
|
||||
export PGPROBACKUP_SSH_REMOTE=ON
|
||||
|
||||
Run suit of basic simple tests:
|
||||
|
@ -47,7 +47,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# Recreate backup calagoue
|
||||
# Recreate backup catalog
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
|
||||
@ -350,7 +350,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_arhive_push_file_exists(self):
|
||||
def test_archive_push_file_exists(self):
|
||||
"""Archive-push if file exists"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -359,8 +359,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
'checkpoint_timeout': '30s'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
@ -442,7 +442,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_arhive_push_file_exists_overwrite(self):
|
||||
def test_archive_push_file_exists_overwrite(self):
|
||||
"""Archive-push if file exists"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
@ -451,8 +451,8 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
'checkpoint_timeout': '30s'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
|
@ -24,9 +24,9 @@ except ImportError:
|
||||
class SimpleAuthTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_backup_via_unpriviledged_user(self):
|
||||
def test_backup_via_unprivileged_user(self):
|
||||
"""
|
||||
Make node, create unpriviledged user, try to
|
||||
Make node, create unprivileged user, try to
|
||||
run a backups without EXECUTE rights on
|
||||
certain functions
|
||||
"""
|
||||
|
@ -643,7 +643,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
path = os.path.join(root, file)
|
||||
list = list + [path]
|
||||
|
||||
# We expect that relfilenode occures only once
|
||||
# We expect that relfilenode can be encountered only once
|
||||
if len(list) > 1:
|
||||
message = ""
|
||||
for string in list:
|
||||
@ -2019,7 +2019,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', replica, datname='backupdb',
|
||||
options=['-U', 'backup', '--log-level-file=verbose'])
|
||||
|
||||
# PAGE
|
||||
# PAGE backup from replica
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='page',
|
||||
datname='backupdb', options=['-U', 'backup'])
|
||||
@ -2027,7 +2027,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'replica', replica, backup_type='page',
|
||||
datname='backupdb', options=['--stream', '-U', 'backup'])
|
||||
|
||||
# DELTA
|
||||
# DELTA backup from replica
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, backup_type='delta',
|
||||
datname='backupdb', options=['-U', 'backup'])
|
||||
|
@ -330,7 +330,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# corruption of both indexes in db1 and db2 must be detected
|
||||
# also the that amcheck is not installed in 'postgres'
|
||||
# musted be logged
|
||||
# should be logged
|
||||
with open(log_file_path) as f:
|
||||
log_file_content = f.read()
|
||||
self.assertIn(
|
||||
|
@ -472,9 +472,9 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_uncompressable_pages(self):
|
||||
def test_incompressible_pages(self):
|
||||
"""
|
||||
make archive node, create table with uncompressable toast pages,
|
||||
make archive node, create table with incompressible toast pages,
|
||||
take backup with compression, make sure that page was not compressed,
|
||||
restore backup and check data correctness
|
||||
"""
|
||||
|
@ -1418,7 +1418,7 @@ class ProbackupTest(object):
|
||||
|
||||
else:
|
||||
error_message += (
|
||||
'\nFile dissappearance.\n '
|
||||
'\nFile disappearance.\n '
|
||||
'File: {0}\n').format(
|
||||
os.path.join(restored_pgdata['pgdata'], file)
|
||||
)
|
||||
|
@ -15,7 +15,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
"""
|
||||
make node, take full backup, stop it in the middle
|
||||
run validate, expect it to successfully executed,
|
||||
concurrect RUNNING backup with pid file and active process is legal
|
||||
concurrent RUNNING backup with pid file and active process is legal
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
@ -406,7 +406,7 @@ class LockingTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
def test_locking_concurrent_vaidate_and_backup(self):
|
||||
def test_locking_concurrent_validate_and_backup(self):
|
||||
"""
|
||||
make node, take full backup, launch validate
|
||||
and stop it in the middle, take page backup.
|
||||
|
@ -1345,7 +1345,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
def test_merge_different_compression_algo(self):
|
||||
"""
|
||||
Check that backups with different compression algorihtms can be merged
|
||||
Check that backups with different compression algorithms can be merged
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
|
@ -906,7 +906,7 @@ class PageBackupTest(ProbackupTest, unittest.TestCase):
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,100000) i;")
|
||||
|
||||
# copy lastest wal segment
|
||||
# copy latest wal segment
|
||||
wals_dir = os.path.join(backup_dir, 'wal', 'alien_node')
|
||||
wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
|
||||
wals_dir, f)) and not f.endswith('.backup')]
|
||||
|
@ -27,7 +27,7 @@ class ArchiveCheck(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
|
||||
# make erroneus archive_command
|
||||
# make erroneous archive_command
|
||||
node.append_conf("postgresql.auto.conf", "archive_command = 'exit 0'")
|
||||
node.slow_start()
|
||||
|
||||
|
@ -122,8 +122,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_uncommited_xact(self):
|
||||
"""make ptrack backup while there is uncommited open transaction"""
|
||||
def test_ptrack_uncommitted_xact(self):
|
||||
"""make ptrack backup while there is uncommitted open transaction"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
|
@ -821,7 +821,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
# we should die here because exception is what we expect to happen
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because restore destionation is not empty.\n "
|
||||
"Expecting Error because restore destination is not empty.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
|
@ -304,7 +304,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
def test_validate_corrupted_intermediate_backups(self):
|
||||
"""
|
||||
make archive node, take FULL, PAGE1, PAGE2 backups,
|
||||
corrupt file in FULL and PAGE1 backupd, run validate on PAGE1,
|
||||
corrupt file in FULL and PAGE1 backups, run validate on PAGE1,
|
||||
expect FULL and PAGE1 to gain status CORRUPT and
|
||||
PAGE2 gain status ORPHAN
|
||||
"""
|
||||
|
@ -27,7 +27,7 @@ yum install -y postgresql95-devel make gcc readline-devel openssl-devel pam-deve
|
||||
make top_srcdir=postgresql-$PGVERSION
|
||||
make install top_srcdir=postgresql-$PGVERSION
|
||||
|
||||
# initalize cluster and database
|
||||
# initialize cluster and database
|
||||
yum install -y postgresql95-server
|
||||
su postgres -c "/usr/pgsql-9.5/bin/initdb -D $PGDATA -k"
|
||||
cat <<EOF > $PGDATA/pg_hba.conf
|
||||
|
Loading…
x
Reference in New Issue
Block a user