mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-03-17 21:18:00 +02:00
Merge branch 'master' into stable
This commit is contained in:
commit
8b45e972bb
4
Makefile
4
Makefile
@ -1,7 +1,7 @@
|
||||
PROGRAM = pg_probackup
|
||||
OBJS = src/backup.o src/catalog.o src/configure.o src/data.o \
|
||||
src/delete.o src/dir.o src/fetch.o src/help.o src/init.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o src/status.o \
|
||||
src/pg_probackup.o src/restore.o src/show.o \
|
||||
src/util.o src/validate.o src/datapagemap.o src/parsexlog.o \
|
||||
src/xlogreader.o src/streamutil.o src/receivelog.o \
|
||||
src/archive.o src/utils/parray.o src/utils/pgut.o src/utils/logger.o \
|
||||
@ -38,7 +38,7 @@ EXTRA_CLEAN += src/walmethods.c src/walmethods.h
|
||||
INCLUDES += src/walmethods.h
|
||||
endif
|
||||
|
||||
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc
|
||||
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_srcdir)/$(subdir)/src
|
||||
override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS)
|
||||
PG_LIBS = $(libpq_pgport) ${PTHREAD_CFLAGS}
|
||||
|
||||
|
@ -145,7 +145,6 @@ sub build_pgprobackup
|
||||
'pg_probackup.c',
|
||||
'restore.c',
|
||||
'show.c',
|
||||
'status.c',
|
||||
'util.c',
|
||||
'validate.c'
|
||||
);
|
||||
|
@ -7,10 +7,10 @@
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
/*
|
||||
* pg_probackup specific archive command for archive backups
|
||||
|
73
src/backup.c
73
src/backup.c
@ -10,25 +10,28 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#include "catalog/catalog.h"
|
||||
#endif
|
||||
#include "catalog/pg_tablespace.h"
|
||||
#include "datapagemap.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "pgtar.h"
|
||||
#include "receivelog.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "streamutil.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "utils/thread.h"
|
||||
|
||||
#define PG_STOP_BACKUP_TIMEOUT 300
|
||||
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
|
||||
static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr;
|
||||
static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr;
|
||||
@ -531,7 +534,7 @@ do_backup_instance(void)
|
||||
prev_backup_start_lsn = prev_backup->start_lsn;
|
||||
current.parent_backup = prev_backup->start_time;
|
||||
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -906,11 +909,13 @@ do_backup(time_t start_time)
|
||||
/* Start backup. Update backup status. */
|
||||
current.status = BACKUP_STATUS_RUNNING;
|
||||
current.start_time = start_time;
|
||||
StrNCpy(current.program_version, PROGRAM_VERSION,
|
||||
sizeof(current.program_version));
|
||||
|
||||
/* Create backup directory and BACKUP_CONTROL_FILE */
|
||||
if (pgBackupCreateDir(¤t))
|
||||
elog(ERROR, "cannot create backup directory");
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
|
||||
elog(LOG, "Backup destination is initialized");
|
||||
|
||||
@ -932,7 +937,7 @@ do_backup(time_t start_time)
|
||||
/* Backup is done. Update backup status */
|
||||
current.end_time = time(NULL);
|
||||
current.status = BACKUP_STATUS_DONE;
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
|
||||
//elog(LOG, "Backup completed. Total bytes : " INT64_FORMAT "",
|
||||
// current.data_bytes);
|
||||
@ -2010,7 +2015,7 @@ backup_cleanup(bool fatal, void *userdata)
|
||||
base36enc(current.start_time));
|
||||
current.end_time = time(NULL);
|
||||
current.status = BACKUP_STATUS_ERROR;
|
||||
pgBackupWriteBackupControlFile(¤t);
|
||||
write_backup(¤t);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2096,12 +2101,13 @@ backup_files(void *arg)
|
||||
|
||||
if (S_ISREG(buf.st_mode))
|
||||
{
|
||||
pgFile **prev_file;
|
||||
|
||||
/* Check that file exist in previous backup */
|
||||
if (current.backup_mode != BACKUP_MODE_FULL)
|
||||
{
|
||||
char *relative;
|
||||
pgFile key;
|
||||
pgFile **prev_file;
|
||||
|
||||
relative = GetRelativePath(file->path, arguments->from_root);
|
||||
key.path = relative;
|
||||
@ -2131,20 +2137,27 @@ backup_files(void *arg)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* TODO:
|
||||
* Check if file exists in previous backup
|
||||
* If exists:
|
||||
* if mtime > start_backup_time of parent backup,
|
||||
* copy file to backup
|
||||
* if mtime < start_backup_time
|
||||
* calculate crc, compare crc to old file
|
||||
* if crc is the same -> skip file
|
||||
*/
|
||||
else if (!copy_file(arguments->from_root, arguments->to_root, file))
|
||||
else
|
||||
{
|
||||
file->write_size = BYTES_INVALID;
|
||||
elog(VERBOSE, "File \"%s\" was not copied to backup", file->path);
|
||||
continue;
|
||||
bool skip = false;
|
||||
|
||||
/* If non-data file has not changed since last backup... */
|
||||
if (file->exists_in_prev &&
|
||||
buf.st_mtime < current.parent_backup)
|
||||
{
|
||||
calc_file_checksum(file);
|
||||
/* ...and checksum is the same... */
|
||||
if (EQ_CRC32C(file->crc, (*prev_file)->crc))
|
||||
skip = true; /* ...skip copying file. */
|
||||
}
|
||||
if (skip ||
|
||||
!copy_file(arguments->from_root, arguments->to_root, file))
|
||||
{
|
||||
file->write_size = BYTES_INVALID;
|
||||
elog(VERBOSE, "File \"%s\" was not copied to backup",
|
||||
file->path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes",
|
||||
|
@ -11,13 +11,8 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"};
|
||||
@ -221,6 +216,25 @@ read_backup(time_t timestamp)
|
||||
return readBackupControlFile(conf_path);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the backup status into BACKUP_CONTROL_FILE.
|
||||
*
|
||||
* We need to reread the backup using its ID and save it changing only its
|
||||
* status.
|
||||
*/
|
||||
void
|
||||
write_backup_status(pgBackup *backup)
|
||||
{
|
||||
pgBackup *tmp;
|
||||
|
||||
tmp = read_backup(backup->start_time);
|
||||
|
||||
tmp->status = backup->status;
|
||||
write_backup(tmp);
|
||||
|
||||
pgBackupFree(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get backup_mode in string representation.
|
||||
*/
|
||||
@ -431,7 +445,8 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
fprintf(out, "block-size = %u\n", backup->block_size);
|
||||
fprintf(out, "xlog-block-size = %u\n", backup->wal_block_size);
|
||||
fprintf(out, "checksum-version = %u\n", backup->checksum_version);
|
||||
fprintf(out, "program-version = %s\n", PROGRAM_VERSION);
|
||||
if (backup->program_version[0] != '\0')
|
||||
fprintf(out, "program-version = %s\n", backup->program_version);
|
||||
if (backup->server_version[0] != '\0')
|
||||
fprintf(out, "server-version = %s\n", backup->server_version);
|
||||
|
||||
@ -481,17 +496,19 @@ pgBackupWriteControl(FILE *out, pgBackup *backup)
|
||||
fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo);
|
||||
}
|
||||
|
||||
/* create BACKUP_CONTROL_FILE */
|
||||
/*
|
||||
* Save the backup content into BACKUP_CONTROL_FILE.
|
||||
*/
|
||||
void
|
||||
pgBackupWriteBackupControlFile(pgBackup *backup)
|
||||
write_backup(pgBackup *backup)
|
||||
{
|
||||
FILE *fp = NULL;
|
||||
char ini_path[MAXPGPATH];
|
||||
char conf_path[MAXPGPATH];
|
||||
|
||||
pgBackupGetPath(backup, ini_path, lengthof(ini_path), BACKUP_CONTROL_FILE);
|
||||
fp = fopen(ini_path, "wt");
|
||||
pgBackupGetPath(backup, conf_path, lengthof(conf_path), BACKUP_CONTROL_FILE);
|
||||
fp = fopen(conf_path, "wt");
|
||||
if (fp == NULL)
|
||||
elog(ERROR, "cannot open configuration file \"%s\": %s", ini_path,
|
||||
elog(ERROR, "Cannot open configuration file \"%s\": %s", conf_path,
|
||||
strerror(errno));
|
||||
|
||||
pgBackupWriteControl(fp, backup);
|
||||
|
@ -8,9 +8,6 @@
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
#include "utils/logger.h"
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include "utils/json.h"
|
||||
|
||||
|
81
src/data.c
81
src/data.c
@ -10,21 +10,23 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "storage/block.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "storage/checksum.h"
|
||||
#include "storage/checksum_impl.h"
|
||||
#include <common/pg_lzcompress.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
|
||||
/* Union to ease operations on relation pages */
|
||||
typedef union DataPage
|
||||
{
|
||||
PageHeaderData page_data;
|
||||
char data[BLCKSZ];
|
||||
} DataPage;
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
/* Implementation of zlib compression method */
|
||||
static int32
|
||||
@ -796,7 +798,7 @@ restore_data_file(const char *to_path, pgFile *file, bool allow_truncate,
|
||||
if (ftruncate(fileno(out), write_pos) != 0)
|
||||
elog(ERROR, "cannot truncate \"%s\": %s",
|
||||
file->path, strerror(errno));
|
||||
elog(INFO, "Delta truncate file %s to block %u",
|
||||
elog(VERBOSE, "Delta truncate file %s to block %u",
|
||||
file->path, truncate_from);
|
||||
}
|
||||
|
||||
@ -1408,48 +1410,52 @@ calc_file_checksum(pgFile *file)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Validate given page
|
||||
* return value:
|
||||
/*
|
||||
* Validate given page.
|
||||
*
|
||||
* Returns value:
|
||||
* 0 - if the page is not found
|
||||
* 1 - if the page is found and valid
|
||||
* -1 - if the page is found but invalid
|
||||
*/
|
||||
#define PAGE_IS_NOT_FOUND 0
|
||||
#define PAGE_IS_FOUND_AND_VALID 1
|
||||
#define PAGE_IS_FOUND_AND__NOT_VALID -1
|
||||
#define PAGE_IS_FOUND_AND_NOT_VALID -1
|
||||
static int
|
||||
validate_one_page(Page page, pgFile *file,
|
||||
BlockNumber blknum, XLogRecPtr stop_lsn,
|
||||
uint32 checksum_version)
|
||||
{
|
||||
PageHeader phdr;
|
||||
XLogRecPtr lsn;
|
||||
bool page_header_is_sane = false;
|
||||
bool checksum_is_ok = false;
|
||||
XLogRecPtr lsn;
|
||||
bool page_header_is_sane = false;
|
||||
bool checksum_is_ok = false;
|
||||
|
||||
/* new level of paranoia */
|
||||
if (page == NULL)
|
||||
{
|
||||
elog(LOG, "File %s, block %u, page is NULL",
|
||||
file->path, blknum);
|
||||
return PAGE_IS_NOT_FOUND;
|
||||
elog(LOG, "File \"%s\", block %u, page is NULL", file->path, blknum);
|
||||
return PAGE_IS_NOT_FOUND;
|
||||
}
|
||||
|
||||
phdr = (PageHeader) page;
|
||||
|
||||
if (PageIsNew(page))
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
/* Check if the page is zeroed. */
|
||||
for(i = 0; i < BLCKSZ && page[i] == 0; i++);
|
||||
|
||||
if (i == BLCKSZ)
|
||||
{
|
||||
elog(LOG, "File: %s blknum %u, page is New. empty zeroed page",
|
||||
elog(LOG, "File: %s blknum %u, page is New, empty zeroed page",
|
||||
file->path, blknum);
|
||||
return PAGE_IS_FOUND_AND_VALID;
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(WARNING, "File: %s, block %u, page is New, but not zeroed",
|
||||
elog(WARNING, "File: %s blknum %u, page is New, but not zeroed",
|
||||
file->path, blknum);
|
||||
}
|
||||
|
||||
@ -1458,8 +1464,6 @@ validate_one_page(Page page, pgFile *file,
|
||||
}
|
||||
else
|
||||
{
|
||||
phdr = (PageHeader) page;
|
||||
|
||||
if (PageGetPageSize(phdr) == BLCKSZ &&
|
||||
PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION &&
|
||||
(phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
|
||||
@ -1474,7 +1478,7 @@ validate_one_page(Page page, pgFile *file,
|
||||
if (page_header_is_sane)
|
||||
{
|
||||
/* Verify checksum */
|
||||
if(checksum_version)
|
||||
if (checksum_version)
|
||||
{
|
||||
/*
|
||||
* If checksum is wrong, sleep a bit and then try again
|
||||
@ -1491,8 +1495,7 @@ validate_one_page(Page page, pgFile *file,
|
||||
file->path, blknum);
|
||||
}
|
||||
}
|
||||
|
||||
if (!checksum_version)
|
||||
else
|
||||
{
|
||||
/* Get lsn from page header. Ensure that page is from our time */
|
||||
lsn = PageXLogRecPtrGet(phdr->pd_lsn);
|
||||
@ -1521,7 +1524,7 @@ validate_one_page(Page page, pgFile *file,
|
||||
}
|
||||
}
|
||||
|
||||
return PAGE_IS_FOUND_AND__NOT_VALID;
|
||||
return PAGE_IS_FOUND_AND_NOT_VALID;
|
||||
}
|
||||
|
||||
/* Valiate pages of datafile in backup one by one */
|
||||
@ -1550,16 +1553,16 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
/* read and validate pages one by one */
|
||||
while (true)
|
||||
{
|
||||
Page compressed_page = NULL; /* used as read buffer */
|
||||
Page page = NULL;
|
||||
DataPage compressed_page; /* used as read buffer */
|
||||
DataPage page;
|
||||
BackupPageHeader header;
|
||||
BlockNumber blknum;
|
||||
BlockNumber blknum = 0;
|
||||
|
||||
/* read BackupPageHeader */
|
||||
read_len = fread(&header, 1, sizeof(header), in);
|
||||
if (read_len != sizeof(header))
|
||||
{
|
||||
int errno_tmp = errno;
|
||||
int errno_tmp = errno;
|
||||
if (read_len == 0 && feof(in))
|
||||
break; /* EOF found */
|
||||
else if (read_len != 0 && feof(in))
|
||||
@ -1586,7 +1589,7 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
|
||||
Assert(header.compressed_size <= BLCKSZ);
|
||||
|
||||
read_len = fread(compressed_page, 1,
|
||||
read_len = fread(compressed_page.data, 1,
|
||||
MAXALIGN(header.compressed_size), in);
|
||||
if (read_len != MAXALIGN(header.compressed_size))
|
||||
elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d",
|
||||
@ -1596,23 +1599,23 @@ check_file_pages(pgFile *file, XLogRecPtr stop_lsn, uint32 checksum_version)
|
||||
{
|
||||
int32 uncompressed_size = 0;
|
||||
|
||||
uncompressed_size = do_decompress(page, BLCKSZ,
|
||||
compressed_page,
|
||||
MAXALIGN(header.compressed_size),
|
||||
uncompressed_size = do_decompress(page.data, BLCKSZ,
|
||||
compressed_page.data,
|
||||
header.compressed_size,
|
||||
file->compress_alg);
|
||||
|
||||
if (uncompressed_size != BLCKSZ)
|
||||
elog(ERROR, "page of file \"%s\" uncompressed to %d bytes. != BLCKSZ",
|
||||
file->path, uncompressed_size);
|
||||
|
||||
if (validate_one_page(page, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND__NOT_VALID)
|
||||
if (validate_one_page(page.data, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
|
||||
is_valid = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (validate_one_page(compressed_page, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND__NOT_VALID)
|
||||
if (validate_one_page(compressed_page.data, file, blknum,
|
||||
stop_lsn, checksum_version) == PAGE_IS_FOUND_AND_NOT_VALID)
|
||||
is_valid = false;
|
||||
}
|
||||
}
|
||||
|
16
src/delete.c
16
src/delete.c
@ -14,11 +14,11 @@
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static int pgBackupDeleteFiles(pgBackup *backup);
|
||||
static int delete_backup_files(pgBackup *backup);
|
||||
static void delete_walfiles(XLogRecPtr oldest_lsn, TimeLineID oldest_tli,
|
||||
uint32 xlog_seg_size);
|
||||
|
||||
int
|
||||
void
|
||||
do_delete(time_t backup_id)
|
||||
{
|
||||
int i;
|
||||
@ -85,7 +85,7 @@ do_delete(time_t backup_id)
|
||||
if (interrupted)
|
||||
elog(ERROR, "interrupted during delete backup");
|
||||
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
}
|
||||
|
||||
parray_free(delete_list);
|
||||
@ -115,8 +115,6 @@ do_delete(time_t backup_id)
|
||||
/* cleanup */
|
||||
parray_walk(backup_list, pgBackupFree);
|
||||
parray_free(backup_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -205,7 +203,7 @@ do_retention_purge(void)
|
||||
}
|
||||
|
||||
/* Delete backup and update status to DELETED */
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
backup_deleted = true;
|
||||
}
|
||||
}
|
||||
@ -248,7 +246,7 @@ do_retention_purge(void)
|
||||
* BACKUP_STATUS_DELETED.
|
||||
*/
|
||||
static int
|
||||
pgBackupDeleteFiles(pgBackup *backup)
|
||||
delete_backup_files(pgBackup *backup)
|
||||
{
|
||||
size_t i;
|
||||
char path[MAXPGPATH];
|
||||
@ -271,7 +269,7 @@ pgBackupDeleteFiles(pgBackup *backup)
|
||||
* the error occurs before deleting all backup files.
|
||||
*/
|
||||
backup->status = BACKUP_STATUS_DELETING;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
/* list files to be deleted */
|
||||
files = parray_new();
|
||||
@ -433,7 +431,7 @@ do_delete_instance(void)
|
||||
for (i = 0; i < parray_num(backup_list); i++)
|
||||
{
|
||||
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
|
||||
pgBackupDeleteFiles(backup);
|
||||
delete_backup_files(backup);
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
|
24
src/dir.c
24
src/dir.c
@ -10,15 +10,14 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#if PG_VERSION_NUM < 110000
|
||||
#include "catalog/catalog.h"
|
||||
#endif
|
||||
#include "catalog/pg_tablespace.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "catalog/catalog.h"
|
||||
#include "catalog/pg_tablespace.h"
|
||||
#include "datapagemap.h"
|
||||
|
||||
/*
|
||||
* The contents of these directories are removed or recreated during server
|
||||
@ -368,7 +367,7 @@ BlackListCompare(const void *str1, const void *str2)
|
||||
* pgFile objects to "files". We add "root" to "files" if add_root is true.
|
||||
*
|
||||
* When omit_symlink is true, symbolic link is ignored and only file or
|
||||
* directory llnked to will be listed.
|
||||
* directory linked to will be listed.
|
||||
*/
|
||||
void
|
||||
dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink,
|
||||
@ -996,14 +995,6 @@ create_data_directories(const char *data_dir, const char *backup_dir,
|
||||
linked_path, dir_created, link_name);
|
||||
}
|
||||
|
||||
/*
|
||||
* This check was done in check_tablespace_mapping(). But do
|
||||
* it again.
|
||||
*/
|
||||
if (!dir_is_empty(linked_path))
|
||||
elog(ERROR, "restore tablespace destination is not empty: \"%s\"",
|
||||
linked_path);
|
||||
|
||||
if (link_sep)
|
||||
elog(LOG, "create directory \"%s\" and symbolic link \"%.*s\"",
|
||||
linked_path,
|
||||
@ -1372,8 +1363,7 @@ dir_read_file_list(const char *root, const char *file_txt)
|
||||
|
||||
fp = fopen(file_txt, "rt");
|
||||
if (fp == NULL)
|
||||
elog(errno == ENOENT ? ERROR : ERROR,
|
||||
"cannot open \"%s\": %s", file_txt, strerror(errno));
|
||||
elog(ERROR, "cannot open \"%s\": %s", file_txt, strerror(errno));
|
||||
|
||||
files = parray_new();
|
||||
|
||||
|
14
src/fetch.c
14
src/fetch.c
@ -8,19 +8,11 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include "catalog/catalog.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/*
|
||||
* Read a file into memory. The file to be read is <datadir>/<path>.
|
||||
* The file contents are returned in a malloc'd buffer, and *filesize
|
||||
|
@ -6,6 +6,7 @@
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
static void help_init(void);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
/*
|
||||
|
22
src/merge.c
22
src/merge.c
@ -75,7 +75,9 @@ do_merge(time_t backup_id)
|
||||
continue;
|
||||
else if (backup->start_time == backup_id && !dest_backup)
|
||||
{
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
if (backup->status != BACKUP_STATUS_OK &&
|
||||
/* It is possible that previous merging was interrupted */
|
||||
backup->status != BACKUP_STATUS_MERGING)
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(backup->start_time), status2str(backup->status));
|
||||
|
||||
@ -93,19 +95,15 @@ do_merge(time_t backup_id)
|
||||
if (backup->start_time != prev_parent)
|
||||
continue;
|
||||
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
elog(ERROR, "Skipping backup %s, because it has non-valid status: %s",
|
||||
if (backup->status != BACKUP_STATUS_OK &&
|
||||
/* It is possible that previous merging was interrupted */
|
||||
backup->status != BACKUP_STATUS_MERGING)
|
||||
elog(ERROR, "Backup %s has status: %s",
|
||||
base36enc(backup->start_time), status2str(backup->status));
|
||||
|
||||
/* If we already found dest_backup, look for full backup */
|
||||
if (dest_backup && backup->backup_mode == BACKUP_MODE_FULL)
|
||||
{
|
||||
if (backup->status != BACKUP_STATUS_OK)
|
||||
elog(ERROR, "Parent full backup %s for the given backup %s has status: %s",
|
||||
base36enc_dup(backup->start_time),
|
||||
base36enc_dup(dest_backup->start_time),
|
||||
status2str(backup->status));
|
||||
|
||||
full_backup = backup;
|
||||
full_backup_idx = i;
|
||||
|
||||
@ -169,10 +167,10 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
elog(LOG, "Merging backup %s with backup %s", from_backup_id, to_backup_id);
|
||||
|
||||
to_backup->status = BACKUP_STATUS_MERGING;
|
||||
pgBackupWriteBackupControlFile(to_backup);
|
||||
write_backup_status(to_backup);
|
||||
|
||||
from_backup->status = BACKUP_STATUS_MERGING;
|
||||
pgBackupWriteBackupControlFile(from_backup);
|
||||
write_backup_status(from_backup);
|
||||
|
||||
/*
|
||||
* Make backup paths.
|
||||
@ -328,7 +326,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup)
|
||||
to_backup->wal_bytes = BYTES_INVALID;
|
||||
|
||||
pgBackupWriteFileList(to_backup, files, from_database_path);
|
||||
pgBackupWriteBackupControlFile(to_backup);
|
||||
write_backup_status(to_backup);
|
||||
|
||||
/* Cleanup */
|
||||
pfree(threads_args);
|
||||
|
@ -12,15 +12,15 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include "access/transam.h"
|
||||
#include "catalog/pg_control.h"
|
||||
#include "commands/dbcommands_xlog.h"
|
||||
#include "catalog/storage_xlog.h"
|
||||
|
||||
#ifdef HAVE_LIBZ
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
|
||||
#include "commands/dbcommands_xlog.h"
|
||||
#include "catalog/storage_xlog.h"
|
||||
#include "access/transam.h"
|
||||
#include "utils/thread.h"
|
||||
|
||||
/*
|
||||
@ -136,6 +136,22 @@ static void PrintXLogCorruptionMsg(XLogPageReadPrivate *private_data,
|
||||
static XLogSegNo nextSegNoToRead = 0;
|
||||
static pthread_mutex_t wal_segment_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
/* copied from timestamp.c */
|
||||
static pg_time_t
|
||||
timestamptz_to_time_t(TimestampTz t)
|
||||
{
|
||||
pg_time_t result;
|
||||
|
||||
#ifdef HAVE_INT64_TIMESTAMP
|
||||
result = (pg_time_t) (t / USECS_PER_SEC +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#else
|
||||
result = (pg_time_t) (t +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do manual switch to the next WAL segment.
|
||||
*
|
||||
@ -458,7 +474,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup,
|
||||
* the backup is definitely corrupted. Update its status.
|
||||
*/
|
||||
backup->status = BACKUP_STATUS_CORRUPT;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "There are not enough WAL records to consistenly restore "
|
||||
"backup %s from START LSN: %X/%X to STOP LSN: %X/%X",
|
||||
|
@ -9,20 +9,36 @@
|
||||
*/
|
||||
|
||||
#include "pg_probackup.h"
|
||||
#include "streamutil.h"
|
||||
#include "utils/thread.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include "pg_getopt.h"
|
||||
#include "streamutil.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "utils/thread.h"
|
||||
|
||||
const char *PROGRAM_VERSION = "2.0.21";
|
||||
const char *PROGRAM_URL = "https://github.com/postgrespro/pg_probackup";
|
||||
const char *PROGRAM_EMAIL = "https://github.com/postgrespro/pg_probackup/issues";
|
||||
|
||||
typedef enum ProbackupSubcmd
|
||||
{
|
||||
NO_CMD = 0,
|
||||
INIT_CMD,
|
||||
ADD_INSTANCE_CMD,
|
||||
DELETE_INSTANCE_CMD,
|
||||
ARCHIVE_PUSH_CMD,
|
||||
ARCHIVE_GET_CMD,
|
||||
BACKUP_CMD,
|
||||
RESTORE_CMD,
|
||||
VALIDATE_CMD,
|
||||
DELETE_CMD,
|
||||
MERGE_CMD,
|
||||
SHOW_CMD,
|
||||
SET_CONFIG_CMD,
|
||||
SHOW_CONFIG_CMD
|
||||
} ProbackupSubcmd;
|
||||
|
||||
/* directory options */
|
||||
char *backup_path = NULL;
|
||||
char *pgdata = NULL;
|
||||
@ -113,7 +129,7 @@ ShowFormat show_format = SHOW_PLAIN;
|
||||
|
||||
/* current settings */
|
||||
pgBackup current;
|
||||
ProbackupSubcmd backup_subcmd = NO_CMD;
|
||||
static ProbackupSubcmd backup_subcmd = NO_CMD;
|
||||
|
||||
static bool help_opt = false;
|
||||
|
||||
@ -141,14 +157,14 @@ static pgut_option options[] =
|
||||
{ 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMDLINE },
|
||||
{ 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMDLINE },
|
||||
{ 's', 'S', "slot", &replication_slot, SOURCE_CMDLINE },
|
||||
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{ 'u', 11, "archive-timeout", &archive_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
{ 'b', 12, "delete-wal", &delete_wal, SOURCE_CMDLINE },
|
||||
{ 'b', 13, "delete-expired", &delete_expired, SOURCE_CMDLINE },
|
||||
{ 's', 14, "master-db", &master_db, SOURCE_CMDLINE, },
|
||||
{ 's', 15, "master-host", &master_host, SOURCE_CMDLINE, },
|
||||
{ 's', 16, "master-port", &master_port, SOURCE_CMDLINE, },
|
||||
{ 's', 17, "master-user", &master_user, SOURCE_CMDLINE, },
|
||||
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_MS },
|
||||
{ 'u', 18, "replica-timeout", &replica_timeout, SOURCE_CMDLINE, SOURCE_DEFAULT, OPTION_UNIT_S },
|
||||
/* TODO not completed feature. Make it unavailiable from user level
|
||||
{ 'b', 18, "remote", &is_remote_backup, SOURCE_CMDLINE, }, */
|
||||
/* restore options */
|
||||
@ -534,7 +550,8 @@ main(int argc, char *argv[])
|
||||
if (delete_expired)
|
||||
return do_retention_purge();
|
||||
else
|
||||
return do_delete(current.backup_id);
|
||||
do_delete(current.backup_id);
|
||||
break;
|
||||
case MERGE_CMD:
|
||||
do_merge(current.backup_id);
|
||||
break;
|
||||
|
@ -11,40 +11,25 @@
|
||||
#define PG_PROBACKUP_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <libpq-fe.h>
|
||||
|
||||
#include "access/timeline.h"
|
||||
#include "access/xlogdefs.h"
|
||||
#include "access/xlog_internal.h"
|
||||
#include "catalog/pg_control.h"
|
||||
#include "storage/block.h"
|
||||
#include "storage/bufpage.h"
|
||||
#include "storage/checksum.h"
|
||||
#include "utils/pg_crc.h"
|
||||
#include "common/relpath.h"
|
||||
#include "port.h"
|
||||
|
||||
#ifdef FRONTEND
|
||||
#undef FRONTEND
|
||||
#include "port/atomics.h"
|
||||
#include "port/atomics.h"
|
||||
#define FRONTEND
|
||||
#else
|
||||
#include "port/atomics.h"
|
||||
#endif
|
||||
|
||||
#include "utils/logger.h"
|
||||
#include "utils/parray.h"
|
||||
#include "utils/pgut.h"
|
||||
|
||||
#include "datapagemap.h"
|
||||
|
||||
# define PG_STOP_BACKUP_TIMEOUT 300
|
||||
/*
|
||||
* Macro needed to parse ptrack.
|
||||
* NOTE Keep those values syncronised with definitions in ptrack.h
|
||||
*/
|
||||
#define PTRACK_BITS_PER_HEAPBLOCK 1
|
||||
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
|
||||
|
||||
/* Directory/File names */
|
||||
#define DATABASE_DIR "database"
|
||||
#define BACKUPS_DIR "backups"
|
||||
@ -139,24 +124,6 @@ typedef enum BackupMode
|
||||
BACKUP_MODE_FULL /* full backup */
|
||||
} BackupMode;
|
||||
|
||||
typedef enum ProbackupSubcmd
|
||||
{
|
||||
NO_CMD = 0,
|
||||
INIT_CMD,
|
||||
ADD_INSTANCE_CMD,
|
||||
DELETE_INSTANCE_CMD,
|
||||
ARCHIVE_PUSH_CMD,
|
||||
ARCHIVE_GET_CMD,
|
||||
BACKUP_CMD,
|
||||
RESTORE_CMD,
|
||||
VALIDATE_CMD,
|
||||
DELETE_CMD,
|
||||
MERGE_CMD,
|
||||
SHOW_CMD,
|
||||
SET_CONFIG_CMD,
|
||||
SHOW_CONFIG_CMD
|
||||
} ProbackupSubcmd;
|
||||
|
||||
typedef enum ShowFormat
|
||||
{
|
||||
SHOW_PLAIN,
|
||||
@ -281,13 +248,6 @@ typedef struct pgRecoveryTarget
|
||||
bool restore_no_validate;
|
||||
} pgRecoveryTarget;
|
||||
|
||||
/* Union to ease operations on relation pages */
|
||||
typedef union DataPage
|
||||
{
|
||||
PageHeaderData page_data;
|
||||
char data[BLCKSZ];
|
||||
} DataPage;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const char *from_root;
|
||||
@ -398,11 +358,6 @@ extern CompressAlg compress_alg;
|
||||
extern int compress_level;
|
||||
extern bool compress_shortcut;
|
||||
|
||||
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
|
||||
#define COMPRESS_LEVEL_DEFAULT 1
|
||||
|
||||
extern CompressAlg parse_compress_alg(const char *arg);
|
||||
extern const char* deparse_compress_alg(int alg);
|
||||
/* other options */
|
||||
extern char *instance_name;
|
||||
extern uint64 system_identifier;
|
||||
@ -413,7 +368,6 @@ extern ShowFormat show_format;
|
||||
|
||||
/* current settings */
|
||||
extern pgBackup current;
|
||||
extern ProbackupSubcmd backup_subcmd;
|
||||
|
||||
/* in dir.c */
|
||||
/* exclude directory list for $PGDATA file listing */
|
||||
@ -470,7 +424,7 @@ extern uint32 get_config_xlog_seg_size(void);
|
||||
extern int do_show(time_t requested_backup_id);
|
||||
|
||||
/* in delete.c */
|
||||
extern int do_delete(time_t backup_id);
|
||||
extern void do_delete(time_t backup_id);
|
||||
extern int do_retention_purge(void);
|
||||
extern int do_delete_instance(void);
|
||||
|
||||
@ -491,6 +445,9 @@ extern int do_validate_all(void);
|
||||
|
||||
/* in catalog.c */
|
||||
extern pgBackup *read_backup(time_t timestamp);
|
||||
extern void write_backup(pgBackup *backup);
|
||||
extern void write_backup_status(pgBackup *backup);
|
||||
|
||||
extern const char *pgBackupGetBackupMode(pgBackup *backup);
|
||||
|
||||
extern parray *catalog_get_backup_list(time_t requested_backup_id);
|
||||
@ -498,7 +455,6 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list,
|
||||
TimeLineID tli);
|
||||
extern void catalog_lock(void);
|
||||
extern void pgBackupWriteControl(FILE *out, pgBackup *backup);
|
||||
extern void pgBackupWriteBackupControlFile(pgBackup *backup);
|
||||
extern void pgBackupWriteFileList(pgBackup *backup, parray *files,
|
||||
const char *root);
|
||||
|
||||
@ -517,6 +473,12 @@ extern int scan_parent_chain(pgBackup *current_backup, pgBackup **result_backup)
|
||||
extern bool is_parent(time_t parent_backup_time, pgBackup *child_backup, bool inclusive);
|
||||
extern int get_backup_index_number(parray *backup_list, pgBackup *backup);
|
||||
|
||||
#define COMPRESS_ALG_DEFAULT NOT_DEFINED_COMPRESS
|
||||
#define COMPRESS_LEVEL_DEFAULT 1
|
||||
|
||||
extern CompressAlg parse_compress_alg(const char *arg);
|
||||
extern const char* deparse_compress_alg(int alg);
|
||||
|
||||
/* in dir.c */
|
||||
extern void dir_list_file(parray *files, const char *root, bool exclude,
|
||||
bool omit_symlink, bool add_root);
|
||||
@ -602,12 +564,8 @@ extern void remove_not_digit(char *buf, size_t len, const char *str);
|
||||
extern const char *base36enc(long unsigned int value);
|
||||
extern char *base36enc_dup(long unsigned int value);
|
||||
extern long unsigned int base36dec(const char *text);
|
||||
extern pg_time_t timestamptz_to_time_t(TimestampTz t);
|
||||
extern int parse_server_version(char *server_version_str);
|
||||
|
||||
/* in status.c */
|
||||
extern bool is_pg_running(void);
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _DEBUG
|
||||
#define lseek _lseek
|
||||
|
@ -10,13 +10,11 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include "access/timeline.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "catalog/pg_control.h"
|
||||
#include "utils/logger.h"
|
||||
#include "utils/thread.h"
|
||||
|
||||
typedef struct
|
||||
@ -208,7 +206,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
|
||||
base36enc(backup->start_time), missing_backup_id);
|
||||
@ -241,10 +239,13 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time), parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING,
|
||||
"Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -336,7 +337,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt,
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
|
@ -11,12 +11,9 @@
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include "utils/json.h"
|
||||
|
||||
typedef struct ShowBackendRow
|
||||
|
118
src/status.c
118
src/status.c
@ -1,118 +0,0 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* status.c
|
||||
*
|
||||
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
||||
*
|
||||
* Monitor status of a PostgreSQL server.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
/* PID can be negative for standalone backend */
|
||||
typedef long pgpid_t;
|
||||
|
||||
static pgpid_t get_pgpid(void);
|
||||
static bool postmaster_is_alive(pid_t pid);
|
||||
|
||||
/*
|
||||
* get_pgpid
|
||||
*
|
||||
* Get PID of postmaster, by scanning postmaster.pid.
|
||||
*/
|
||||
static pgpid_t
|
||||
get_pgpid(void)
|
||||
{
|
||||
FILE *pidf;
|
||||
long pid;
|
||||
char pid_file[MAXPGPATH];
|
||||
|
||||
snprintf(pid_file, lengthof(pid_file), "%s/postmaster.pid", pgdata);
|
||||
|
||||
pidf = fopen(pid_file, PG_BINARY_R);
|
||||
if (pidf == NULL)
|
||||
{
|
||||
/* No pid file, not an error on startup */
|
||||
if (errno == ENOENT)
|
||||
return 0;
|
||||
else
|
||||
{
|
||||
elog(ERROR, "could not open PID file \"%s\": %s",
|
||||
pid_file, strerror(errno));
|
||||
}
|
||||
}
|
||||
if (fscanf(pidf, "%ld", &pid) != 1)
|
||||
{
|
||||
/* Is the file empty? */
|
||||
if (ftell(pidf) == 0 && feof(pidf))
|
||||
elog(ERROR, "the PID file \"%s\" is empty",
|
||||
pid_file);
|
||||
else
|
||||
elog(ERROR, "invalid data in PID file \"%s\"\n",
|
||||
pid_file);
|
||||
}
|
||||
fclose(pidf);
|
||||
return (pgpid_t) pid;
|
||||
}
|
||||
|
||||
/*
|
||||
* postmaster_is_alive
|
||||
*
|
||||
* Check whether postmaster is alive or not.
|
||||
*/
|
||||
static bool
|
||||
postmaster_is_alive(pid_t pid)
|
||||
{
|
||||
/*
|
||||
* Test to see if the process is still there. Note that we do not
|
||||
* consider an EPERM failure to mean that the process is still there;
|
||||
* EPERM must mean that the given PID belongs to some other userid, and
|
||||
* considering the permissions on $PGDATA, that means it's not the
|
||||
* postmaster we are after.
|
||||
*
|
||||
* Don't believe that our own PID or parent shell's PID is the postmaster,
|
||||
* either. (Windows hasn't got getppid(), though.)
|
||||
*/
|
||||
if (pid == getpid())
|
||||
return false;
|
||||
#ifndef WIN32
|
||||
if (pid == getppid())
|
||||
return false;
|
||||
#endif
|
||||
if (kill(pid, 0) == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_pg_running
|
||||
*
|
||||
*
|
||||
*/
|
||||
bool
|
||||
is_pg_running(void)
|
||||
{
|
||||
pgpid_t pid;
|
||||
|
||||
pid = get_pgpid();
|
||||
|
||||
/* 0 means no pid file */
|
||||
if (pid == 0)
|
||||
return false;
|
||||
|
||||
/* Case of a standalone backend */
|
||||
if (pid < 0)
|
||||
pid = -pid;
|
||||
|
||||
/* Check if postmaster is alive */
|
||||
return postmaster_is_alive((pid_t) pid);
|
||||
}
|
23
src/util.c
23
src/util.c
@ -10,12 +10,9 @@
|
||||
|
||||
#include "pg_probackup.h"
|
||||
|
||||
#include <time.h>
|
||||
#include "catalog/pg_control.h"
|
||||
|
||||
#include "storage/bufpage.h"
|
||||
#if PG_VERSION_NUM >= 110000
|
||||
#include "streamutil.h"
|
||||
#endif
|
||||
#include <time.h>
|
||||
|
||||
const char *
|
||||
base36enc(long unsigned int value)
|
||||
@ -283,22 +280,6 @@ time2iso(char *buf, size_t len, time_t time)
|
||||
}
|
||||
}
|
||||
|
||||
/* copied from timestamp.c */
|
||||
pg_time_t
|
||||
timestamptz_to_time_t(TimestampTz t)
|
||||
{
|
||||
pg_time_t result;
|
||||
|
||||
#ifdef HAVE_INT64_TIMESTAMP
|
||||
result = (pg_time_t) (t / USECS_PER_SEC +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#else
|
||||
result = (pg_time_t) (t +
|
||||
((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Parse string representation of the server version */
|
||||
int
|
||||
parse_server_version(char *server_version_str)
|
||||
|
@ -7,11 +7,9 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "logger.h"
|
||||
#include "pgut.h"
|
||||
@ -551,8 +549,8 @@ open_logfile(FILE **file, const char *filename_format)
|
||||
/* Parsed creation time */
|
||||
|
||||
rotation_requested = (cur_time - creation_time) >
|
||||
/* convert to seconds */
|
||||
log_rotation_age * 60;
|
||||
/* convert to seconds from milliseconds */
|
||||
log_rotation_age / 1000;
|
||||
}
|
||||
else
|
||||
elog_stderr(ERROR, "cannot read creation timestamp from "
|
||||
|
@ -10,8 +10,6 @@
|
||||
#ifndef LOGGER_H
|
||||
#define LOGGER_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#define LOG_NONE (-10)
|
||||
|
||||
/* Log level */
|
||||
|
@ -7,6 +7,8 @@
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include "parray.h"
|
||||
#include "pgut.h"
|
||||
|
||||
|
@ -10,10 +10,6 @@
|
||||
#ifndef PARRAY_H
|
||||
#define PARRAY_H
|
||||
|
||||
#include "postgres_fe.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
/*
|
||||
* "parray" hold pointers to objects in a linear memory area.
|
||||
* Client use "parray *" to access parray object.
|
||||
|
122
src/utils/pgut.c
122
src/utils/pgut.c
@ -9,15 +9,16 @@
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
|
||||
#include "getopt_long.h"
|
||||
#include <limits.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include "libpq-fe.h"
|
||||
#include "libpq/pqsignal.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#include "logger.h"
|
||||
#include "pgut.h"
|
||||
#include "logger.h"
|
||||
|
||||
#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
|
||||
#define SECS_PER_MINUTE 60
|
||||
@ -240,7 +241,7 @@ assign_option(pgut_option *opt, const char *optarg, pgut_optsrc src)
|
||||
*(char **) opt->var = pgut_strdup(optarg);
|
||||
if (strcmp(optarg,"") != 0)
|
||||
return;
|
||||
message = "a valid string. But provided: ";
|
||||
message = "a valid string";
|
||||
break;
|
||||
case 't':
|
||||
if (parse_time(optarg, opt->var,
|
||||
@ -1652,31 +1653,6 @@ pgut_disconnect(PGconn *conn)
|
||||
PQfinish(conn);
|
||||
}
|
||||
|
||||
/* set/get host and port for connecting standby server */
|
||||
const char *
|
||||
pgut_get_host()
|
||||
{
|
||||
return host;
|
||||
}
|
||||
|
||||
const char *
|
||||
pgut_get_port()
|
||||
{
|
||||
return port;
|
||||
}
|
||||
|
||||
void
|
||||
pgut_set_host(const char *new_host)
|
||||
{
|
||||
host = new_host;
|
||||
}
|
||||
|
||||
void
|
||||
pgut_set_port(const char *new_port)
|
||||
{
|
||||
port = new_port;
|
||||
}
|
||||
|
||||
|
||||
PGresult *
|
||||
pgut_execute_parallel(PGconn* conn,
|
||||
@ -2151,60 +2127,6 @@ get_username(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
appendStringInfoFile(StringInfo str, FILE *fp)
|
||||
{
|
||||
AssertArg(str != NULL);
|
||||
AssertArg(fp != NULL);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0)
|
||||
return errno = ENOMEM;
|
||||
|
||||
rc = fread(str->data + str->len, 1, str->maxlen - str->len - 1, fp);
|
||||
if (rc == 0)
|
||||
break;
|
||||
else if (rc > 0)
|
||||
{
|
||||
str->len += rc;
|
||||
str->data[str->len] = '\0';
|
||||
}
|
||||
else if (ferror(fp) && errno != EINTR)
|
||||
return errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
appendStringInfoFd(StringInfo str, int fd)
|
||||
{
|
||||
AssertArg(str != NULL);
|
||||
AssertArg(fd != -1);
|
||||
|
||||
for (;;)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (str->maxlen - str->len < 2 && enlargeStringInfo(str, 1024) == 0)
|
||||
return errno = ENOMEM;
|
||||
|
||||
rc = read(fd, str->data + str->len, str->maxlen - str->len - 1);
|
||||
if (rc == 0)
|
||||
break;
|
||||
else if (rc > 0)
|
||||
{
|
||||
str->len += rc;
|
||||
str->data[str->len] = '\0';
|
||||
}
|
||||
else if (errno != EINTR)
|
||||
return errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *
|
||||
pgut_malloc(size_t size)
|
||||
{
|
||||
@ -2241,36 +2163,6 @@ pgut_strdup(const char *str)
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *
|
||||
strdup_with_len(const char *str, size_t len)
|
||||
{
|
||||
char *r;
|
||||
|
||||
if (str == NULL)
|
||||
return NULL;
|
||||
|
||||
r = pgut_malloc(len + 1);
|
||||
memcpy(r, str, len);
|
||||
r[len] = '\0';
|
||||
return r;
|
||||
}
|
||||
|
||||
/* strdup but trim whitespaces at head and tail */
|
||||
char *
|
||||
strdup_trim(const char *str)
|
||||
{
|
||||
size_t len;
|
||||
|
||||
if (str == NULL)
|
||||
return NULL;
|
||||
|
||||
while (IsSpace(str[0])) { str++; }
|
||||
len = strlen(str);
|
||||
while (len > 0 && IsSpace(str[len - 1])) { len--; }
|
||||
|
||||
return strdup_with_len(str, len);
|
||||
}
|
||||
|
||||
FILE *
|
||||
pgut_fopen(const char *path, const char *mode, bool missing_ok)
|
||||
{
|
||||
|
@ -11,26 +11,9 @@
|
||||
#ifndef PGUT_H
|
||||
#define PGUT_H
|
||||
|
||||
#include "libpq-fe.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "postgres_fe.h"
|
||||
#include "access/xlogdefs.h"
|
||||
#include "logger.h"
|
||||
|
||||
#if !defined(C_H) && !defined(__cplusplus)
|
||||
#ifndef bool
|
||||
typedef char bool;
|
||||
#endif
|
||||
#ifndef true
|
||||
#define true ((bool) 1)
|
||||
#endif
|
||||
#ifndef false
|
||||
#define false ((bool) 0)
|
||||
#endif
|
||||
#endif
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#define INFINITE_STR "INFINITE"
|
||||
|
||||
@ -139,19 +122,12 @@ extern bool pgut_send(PGconn* conn, const char *query, int nParams, const char *
|
||||
extern void pgut_cancel(PGconn* conn);
|
||||
extern int pgut_wait(int num, PGconn *connections[], struct timeval *timeout);
|
||||
|
||||
extern const char *pgut_get_host(void);
|
||||
extern const char *pgut_get_port(void);
|
||||
extern void pgut_set_host(const char *new_host);
|
||||
extern void pgut_set_port(const char *new_port);
|
||||
|
||||
/*
|
||||
* memory allocators
|
||||
*/
|
||||
extern void *pgut_malloc(size_t size);
|
||||
extern void *pgut_realloc(void *p, size_t size);
|
||||
extern char *pgut_strdup(const char *str);
|
||||
extern char *strdup_with_len(const char *str, size_t len);
|
||||
extern char *strdup_trim(const char *str);
|
||||
|
||||
#define pgut_new(type) ((type *) pgut_malloc(sizeof(type)))
|
||||
#define pgut_newarray(type, n) ((type *) pgut_malloc(sizeof(type) * (n)))
|
||||
@ -178,28 +154,6 @@ extern FILE *pgut_fopen(const char *path, const char *mode, bool missing_ok);
|
||||
#define AssertMacro(x) ((void) 0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* StringInfo and string operations
|
||||
*/
|
||||
#define STRINGINFO_H
|
||||
|
||||
#define StringInfoData PQExpBufferData
|
||||
#define StringInfo PQExpBuffer
|
||||
#define makeStringInfo createPQExpBuffer
|
||||
#define initStringInfo initPQExpBuffer
|
||||
#define freeStringInfo destroyPQExpBuffer
|
||||
#define termStringInfo termPQExpBuffer
|
||||
#define resetStringInfo resetPQExpBuffer
|
||||
#define enlargeStringInfo enlargePQExpBuffer
|
||||
#define printfStringInfo printfPQExpBuffer /* reset + append */
|
||||
#define appendStringInfo appendPQExpBuffer
|
||||
#define appendStringInfoString appendPQExpBufferStr
|
||||
#define appendStringInfoChar appendPQExpBufferChar
|
||||
#define appendBinaryStringInfo appendBinaryPQExpBuffer
|
||||
|
||||
extern int appendStringInfoFile(StringInfo str, FILE *fp);
|
||||
extern int appendStringInfoFd(StringInfo str, int fd);
|
||||
|
||||
extern bool parse_bool(const char *value, bool *result);
|
||||
extern bool parse_bool_with_len(const char *value, size_t len, bool *result);
|
||||
extern bool parse_int32(const char *value, int32 *result, int flags);
|
||||
@ -219,8 +173,6 @@ extern void convert_from_base_unit_u(uint64 base_value, int base_unit,
|
||||
#define IsSpace(c) (isspace((unsigned char)(c)))
|
||||
#define IsAlpha(c) (isalpha((unsigned char)(c)))
|
||||
#define IsAlnum(c) (isalnum((unsigned char)(c)))
|
||||
#define IsIdentHead(c) (IsAlpha(c) || (c) == '_')
|
||||
#define IsIdentBody(c) (IsAlnum(c) || (c) == '_')
|
||||
#define ToLower(c) (tolower((unsigned char)(c)))
|
||||
#define ToUpper(c) (toupper((unsigned char)(c)))
|
||||
|
||||
|
@ -133,7 +133,7 @@ pgBackupValidate(pgBackup *backup)
|
||||
|
||||
/* Update backup status */
|
||||
backup->status = corrupted ? BACKUP_STATUS_CORRUPT : BACKUP_STATUS_OK;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
if (corrupted)
|
||||
elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time));
|
||||
@ -338,7 +338,7 @@ do_validate_instance(void)
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
current_backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(current_backup);
|
||||
write_backup_status(current_backup);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s is missing",
|
||||
base36enc(current_backup->start_time),
|
||||
parent_backup_id);
|
||||
@ -363,7 +363,7 @@ do_validate_instance(void)
|
||||
if (current_backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
current_backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(current_backup);
|
||||
write_backup_status(current_backup);
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(current_backup->start_time), parent_backup_id,
|
||||
status2str(tmp_backup->status));
|
||||
@ -420,7 +420,7 @@ do_validate_instance(void)
|
||||
if (backup->status == BACKUP_STATUS_OK)
|
||||
{
|
||||
backup->status = BACKUP_STATUS_ORPHAN;
|
||||
pgBackupWriteBackupControlFile(backup);
|
||||
write_backup_status(backup);
|
||||
|
||||
elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s",
|
||||
base36enc(backup->start_time),
|
||||
|
@ -13,6 +13,9 @@ Check physical correctness of restored instances:
|
||||
Check archive compression:
|
||||
export ARCHIVE_COMPRESSION=ON
|
||||
|
||||
Enable compatibility tests:
|
||||
export PGPROBACKUPBIN_OLD=/path/to/previous_version_pg_probackup_binary
|
||||
|
||||
Specify path to pg_probackup binary file. By default tests use <Path to Git repository>/pg_probackup/
|
||||
export PGPROBACKUPBIN=<path to pg_probackup>
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
import unittest
|
||||
|
||||
from . import init_test, option_test, show_test, \
|
||||
backup_test, delete_test, restore_test, validate_test, \
|
||||
retention_test, ptrack_clean, ptrack_cluster, \
|
||||
ptrack_move_to_tablespace, ptrack_recovery, ptrack_vacuum, \
|
||||
ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
|
||||
from . import init_test, merge, option_test, show_test, compatibility, \
|
||||
backup_test, delete_test, delta, restore_test, validate_test, \
|
||||
retention_test, ptrack_clean, ptrack_empty, ptrack_cluster, \
|
||||
ptrack_move_to_tablespace, ptrack_recovery, ptrack_truncate, \
|
||||
ptrack_vacuum, ptrack_vacuum_bits_frozen, ptrack_vacuum_bits_visibility, \
|
||||
ptrack_vacuum_full, ptrack_vacuum_truncate, pgpro560, pgpro589, \
|
||||
false_positive, replica, compression, page, ptrack, archive, \
|
||||
exclude, cfs_backup, cfs_restore, cfs_validate_backup, auth_test
|
||||
@ -15,22 +15,28 @@ def load_tests(loader, tests, pattern):
|
||||
# suite.addTests(loader.loadTestsFromModule(auth_test))
|
||||
suite.addTests(loader.loadTestsFromModule(archive))
|
||||
suite.addTests(loader.loadTestsFromModule(backup_test))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
suite.addTests(loader.loadTestsFromModule(cfs_restore))
|
||||
# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup))
|
||||
# suite.addTests(loader.loadTestsFromModule(logging))
|
||||
suite.addTests(loader.loadTestsFromModule(compression))
|
||||
suite.addTests(loader.loadTestsFromModule(compatibility))
|
||||
suite.addTests(loader.loadTestsFromModule(delete_test))
|
||||
suite.addTests(loader.loadTestsFromModule(delta))
|
||||
suite.addTests(loader.loadTestsFromModule(exclude))
|
||||
suite.addTests(loader.loadTestsFromModule(false_positive))
|
||||
suite.addTests(loader.loadTestsFromModule(init_test))
|
||||
suite.addTests(loader.loadTestsFromModule(merge))
|
||||
suite.addTests(loader.loadTestsFromModule(option_test))
|
||||
suite.addTests(loader.loadTestsFromModule(page))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_clean))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_empty))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_cluster))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_move_to_tablespace))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_recovery))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_truncate))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_frozen))
|
||||
suite.addTests(loader.loadTestsFromModule(ptrack_vacuum_bits_visibility))
|
||||
|
313
tests/compatibility.py
Normal file
313
tests/compatibility.py
Normal file
@ -0,0 +1,313 @@
|
||||
import unittest
|
||||
import subprocess
|
||||
import os
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
from sys import exit
|
||||
|
||||
module_name = 'compatibility'
|
||||
|
||||
|
||||
class CompatibilityTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_page(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Page BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Page BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='page',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_delta(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# @unittest.expectedFailure
|
||||
# @unittest.skip("skip")
|
||||
def test_backward_compatibility_ptrack(self):
|
||||
"""Description in jira issue PGPRO-434"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'}
|
||||
)
|
||||
self.init_pb(backup_dir, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.add_instance(backup_dir, 'node', node, old_binary=True)
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.set_archiving(backup_dir, 'node', node, old_binary=True)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=10)
|
||||
|
||||
# FULL backup with old binary
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.show_pb(backup_dir)
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# RESTORE old FULL with new binary
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with old binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
old_binary=True)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Delta BACKUP with new binary
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
options=["-c", "4", "-T", "20"]
|
||||
)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='delta',
|
||||
options=['--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
options=["-j", "4", "--recovery-target-action=promote"])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
@ -55,6 +55,51 @@ class DeleteTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_delete_archive_mix_compress_and_non_compressed_segments(self):
|
||||
"""delete full backups"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
# full backup
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
pgbench = node.pgbench(
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
pgbench.wait()
|
||||
pgbench.stdout.close()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
id_1 = show_backups[0]['ID']
|
||||
id_2 = show_backups[1]['ID']
|
||||
id_3 = show_backups[2]['ID']
|
||||
self.delete_pb(backup_dir, 'node', id_2)
|
||||
show_backups = self.show_pb(backup_dir, 'node')
|
||||
self.assertEqual(show_backups[0]['ID'], id_1)
|
||||
self.assertEqual(show_backups[1]['ID'], id_3)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_delete_increment_page(self):
|
||||
"""delete increment and all after him"""
|
||||
|
@ -1 +1 @@
|
||||
pg_probackup 2.0.21
|
||||
pg_probackup 2.0.21
|
@ -48,6 +48,16 @@ idx_ptrack = {
|
||||
'column': 'tsvector',
|
||||
'relation': 't_heap'
|
||||
},
|
||||
't_hash': {
|
||||
'type': 'hash',
|
||||
'column': 'id',
|
||||
'relation': 't_heap'
|
||||
},
|
||||
't_bloom': {
|
||||
'type': 'bloom',
|
||||
'column': 'id',
|
||||
'relation': 't_heap'
|
||||
}
|
||||
}
|
||||
|
||||
archive_script = """
|
||||
@ -221,11 +231,49 @@ class ProbackupTest(object):
|
||||
self.probackup_path = self.test_env["PGPROBACKUPBIN"]
|
||||
else:
|
||||
if self.verbose:
|
||||
print('PGPROBINDIR is not an executable file')
|
||||
print('PGPROBACKUPBIN is not an executable file')
|
||||
|
||||
if not self.probackup_path:
|
||||
self.probackup_path = os.path.abspath(os.path.join(
|
||||
probackup_path_tmp = os.path.join(
|
||||
testgres.get_pg_config()["BINDIR"], 'pg_probackup')
|
||||
|
||||
if os.path.isfile(probackup_path_tmp):
|
||||
if not os.access(probackup_path_tmp, os.X_OK):
|
||||
print('{0} is not an executable file'.format(
|
||||
probackup_path_tmp))
|
||||
else:
|
||||
self.probackup_path = probackup_path_tmp
|
||||
|
||||
if not self.probackup_path:
|
||||
probackup_path_tmp = os.path.abspath(os.path.join(
|
||||
self.dir_path, "../pg_probackup"))
|
||||
|
||||
if os.path.isfile(probackup_path_tmp):
|
||||
if not os.access(probackup_path_tmp, os.X_OK):
|
||||
print('{0} is not an executable file'.format(
|
||||
probackup_path_tmp))
|
||||
else:
|
||||
self.probackup_path = probackup_path_tmp
|
||||
|
||||
if not self.probackup_path:
|
||||
print('pg_probackup binary is not found')
|
||||
exit(1)
|
||||
|
||||
os.environ['PATH'] = os.path.dirname(
|
||||
self.probackup_path) + ":" + os.environ['PATH']
|
||||
|
||||
self.probackup_old_path = None
|
||||
|
||||
if "PGPROBACKUPBIN_OLD" in self.test_env:
|
||||
if (
|
||||
os.path.isfile(self.test_env["PGPROBACKUPBIN_OLD"]) and
|
||||
os.access(self.test_env["PGPROBACKUPBIN_OLD"], os.X_OK)
|
||||
):
|
||||
self.probackup_old_path = self.test_env["PGPROBACKUPBIN_OLD"]
|
||||
else:
|
||||
if self.verbose:
|
||||
print('PGPROBACKUPBIN_OLD is not an executable file')
|
||||
|
||||
def make_simple_node(
|
||||
self,
|
||||
base_dir=None,
|
||||
@ -416,13 +464,13 @@ class ProbackupTest(object):
|
||||
continue
|
||||
if PageNum not in idx_dict['new_pages']:
|
||||
# Page is not present now, meaning that relation got smaller
|
||||
# Ptrack should be equal to 0,
|
||||
# Ptrack should be equal to 1,
|
||||
# We are not freaking out about false positive stuff
|
||||
if idx_dict['ptrack'][PageNum] != 0:
|
||||
if idx_dict['ptrack'][PageNum] != 1:
|
||||
if self.verbose:
|
||||
print(
|
||||
'File: {0}\n Page Number {1} of type {2} was deleted,'
|
||||
' but ptrack value is {3}'.format(
|
||||
' but ptrack value is {3}. THIS IS BAD'.format(
|
||||
idx_dict['path'],
|
||||
PageNum, idx_dict['type'],
|
||||
idx_dict['ptrack'][PageNum])
|
||||
@ -473,10 +521,10 @@ class ProbackupTest(object):
|
||||
idx_dict['ptrack'][PageNum]
|
||||
)
|
||||
)
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
return success
|
||||
# self.assertTrue(
|
||||
# success, 'Ptrack has failed to register changes in data files'
|
||||
# )
|
||||
|
||||
def check_ptrack_recovery(self, idx_dict):
|
||||
size = idx_dict['size']
|
||||
@ -508,13 +556,22 @@ class ProbackupTest(object):
|
||||
)
|
||||
)
|
||||
|
||||
def run_pb(self, command, async=False, gdb=False):
|
||||
def run_pb(self, command, async=False, gdb=False, old_binary=False):
|
||||
if not self.probackup_old_path and old_binary:
|
||||
print("PGPROBACKUPBIN_OLD is not set")
|
||||
exit(1)
|
||||
|
||||
if old_binary:
|
||||
binary_path = self.probackup_old_path
|
||||
else:
|
||||
binary_path = self.probackup_path
|
||||
|
||||
try:
|
||||
self.cmd = [' '.join(map(str, [self.probackup_path] + command))]
|
||||
self.cmd = [' '.join(map(str, [binary_path] + command))]
|
||||
if self.verbose:
|
||||
print(self.cmd)
|
||||
if gdb:
|
||||
return GDBobj([self.probackup_path] + command, self.verbose)
|
||||
return GDBobj([binary_path] + command, self.verbose)
|
||||
if async:
|
||||
return subprocess.Popen(
|
||||
self.cmd,
|
||||
@ -524,7 +581,7 @@ class ProbackupTest(object):
|
||||
)
|
||||
else:
|
||||
self.output = subprocess.check_output(
|
||||
[self.probackup_path] + command,
|
||||
[binary_path] + command,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=self.test_env
|
||||
).decode("utf-8")
|
||||
@ -560,37 +617,45 @@ class ProbackupTest(object):
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise ProbackupException(e.output.decode("utf-8"), command)
|
||||
|
||||
def init_pb(self, backup_dir):
|
||||
def init_pb(self, backup_dir, old_binary=False):
|
||||
|
||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||
|
||||
return self.run_pb([
|
||||
"init",
|
||||
"-B", backup_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def add_instance(self, backup_dir, instance, node):
|
||||
def add_instance(self, backup_dir, instance, node, old_binary=False):
|
||||
|
||||
return self.run_pb([
|
||||
"add-instance",
|
||||
"--instance={0}".format(instance),
|
||||
"-B", backup_dir,
|
||||
"-D", node.data_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def del_instance(self, backup_dir, instance):
|
||||
def del_instance(self, backup_dir, instance, old_binary=False):
|
||||
|
||||
return self.run_pb([
|
||||
"del-instance",
|
||||
"--instance={0}".format(instance),
|
||||
"-B", backup_dir
|
||||
])
|
||||
],
|
||||
old_binary=old_binary
|
||||
)
|
||||
|
||||
def clean_pb(self, backup_dir):
|
||||
shutil.rmtree(backup_dir, ignore_errors=True)
|
||||
|
||||
def backup_node(
|
||||
self, backup_dir, instance, node, data_dir=False,
|
||||
backup_type="full", options=[], async=False, gdb=False
|
||||
backup_type="full", options=[], async=False, gdb=False,
|
||||
old_binary=False
|
||||
):
|
||||
if not node and not data_dir:
|
||||
print('You must provide ether node or data_dir for backup')
|
||||
@ -613,11 +678,11 @@ class ProbackupTest(object):
|
||||
if backup_type:
|
||||
cmd_list += ["-b", backup_type]
|
||||
|
||||
return self.run_pb(cmd_list + options, async, gdb)
|
||||
return self.run_pb(cmd_list + options, async, gdb, old_binary)
|
||||
|
||||
def merge_backup(
|
||||
self, backup_dir, instance, backup_id, async=False,
|
||||
gdb=False, options=[]):
|
||||
gdb=False, old_binary=False, options=[]):
|
||||
cmd_list = [
|
||||
"merge",
|
||||
"-B", backup_dir,
|
||||
@ -625,11 +690,11 @@ class ProbackupTest(object):
|
||||
"-i", backup_id
|
||||
]
|
||||
|
||||
return self.run_pb(cmd_list + options, async, gdb)
|
||||
return self.run_pb(cmd_list + options, async, gdb, old_binary)
|
||||
|
||||
def restore_node(
|
||||
self, backup_dir, instance, node=False,
|
||||
data_dir=None, backup_id=None, options=[]
|
||||
data_dir=None, backup_id=None, old_binary=False, options=[]
|
||||
):
|
||||
if data_dir is None:
|
||||
data_dir = node.data_dir
|
||||
@ -643,11 +708,11 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def show_pb(
|
||||
self, backup_dir, instance=None, backup_id=None,
|
||||
options=[], as_text=False, as_json=True
|
||||
options=[], as_text=False, as_json=True, old_binary=False
|
||||
):
|
||||
|
||||
backup_list = []
|
||||
@ -667,11 +732,11 @@ class ProbackupTest(object):
|
||||
|
||||
if as_text:
|
||||
# You should print it when calling as_text=true
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
# get show result as list of lines
|
||||
if as_json:
|
||||
data = json.loads(self.run_pb(cmd_list + options))
|
||||
data = json.loads(self.run_pb(cmd_list + options, old_binary=old_binary))
|
||||
# print(data)
|
||||
for instance_data in data:
|
||||
# find specific instance if requested
|
||||
@ -687,7 +752,8 @@ class ProbackupTest(object):
|
||||
backup_list.append(backup)
|
||||
return backup_list
|
||||
else:
|
||||
show_splitted = self.run_pb(cmd_list + options).splitlines()
|
||||
show_splitted = self.run_pb(
|
||||
cmd_list + options, old_binary=old_binary).splitlines()
|
||||
if instance is not None and backup_id is None:
|
||||
# cut header(ID, Mode, etc) from show as single string
|
||||
header = show_splitted[1:2][0]
|
||||
@ -742,7 +808,7 @@ class ProbackupTest(object):
|
||||
|
||||
def validate_pb(
|
||||
self, backup_dir, instance=None,
|
||||
backup_id=None, options=[]
|
||||
backup_id=None, options=[], old_binary=False
|
||||
):
|
||||
|
||||
cmd_list = [
|
||||
@ -754,9 +820,11 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def delete_pb(self, backup_dir, instance, backup_id=None, options=[]):
|
||||
def delete_pb(
|
||||
self, backup_dir, instance,
|
||||
backup_id=None, options=[], old_binary=False):
|
||||
cmd_list = [
|
||||
"delete",
|
||||
"-B", backup_dir
|
||||
@ -766,24 +834,26 @@ class ProbackupTest(object):
|
||||
if backup_id:
|
||||
cmd_list += ["-i", backup_id]
|
||||
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def delete_expired(self, backup_dir, instance, options=[]):
|
||||
def delete_expired(
|
||||
self, backup_dir, instance, options=[], old_binary=False):
|
||||
cmd_list = [
|
||||
"delete", "--expired", "--wal",
|
||||
"-B", backup_dir,
|
||||
"--instance={0}".format(instance)
|
||||
]
|
||||
return self.run_pb(cmd_list + options)
|
||||
return self.run_pb(cmd_list + options, old_binary=old_binary)
|
||||
|
||||
def show_config(self, backup_dir, instance):
|
||||
def show_config(self, backup_dir, instance, old_binary=False):
|
||||
out_dict = {}
|
||||
cmd_list = [
|
||||
"show-config",
|
||||
"-B", backup_dir,
|
||||
"--instance={0}".format(instance)
|
||||
]
|
||||
res = self.run_pb(cmd_list).splitlines()
|
||||
|
||||
res = self.run_pb(cmd_list, old_binary=old_binary).splitlines()
|
||||
for line in res:
|
||||
if not line.startswith('#'):
|
||||
name, var = line.partition(" = ")[::2]
|
||||
@ -804,7 +874,8 @@ class ProbackupTest(object):
|
||||
return out_dict
|
||||
|
||||
def set_archiving(
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False):
|
||||
self, backup_dir, instance, node, replica=False, overwrite=False, compress=False,
|
||||
old_binary=False):
|
||||
|
||||
if replica:
|
||||
archive_mode = 'always'
|
||||
@ -824,7 +895,7 @@ class ProbackupTest(object):
|
||||
self.probackup_path, backup_dir, instance)
|
||||
|
||||
if os.name == 'posix':
|
||||
if self.archive_compress:
|
||||
if self.archive_compress or compress:
|
||||
archive_command = archive_command + "--compress "
|
||||
|
||||
if overwrite:
|
||||
@ -940,7 +1011,27 @@ class ProbackupTest(object):
|
||||
node.execute("select pg_switch_wal()")
|
||||
else:
|
||||
node.execute("select pg_switch_xlog()")
|
||||
sleep(1)
|
||||
|
||||
def wait_until_replica_catch_with_master(self, master, replica):
|
||||
|
||||
if self.version_to_num(
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"show server_version")) >= self.version_to_num('10.0'):
|
||||
master_function = 'pg_catalog.pg_current_wal_lsn()'
|
||||
replica_function = 'pg_catalog.pg_last_wal_replay_lsn()'
|
||||
else:
|
||||
master_function = 'pg_catalog.pg_current_xlog_location()'
|
||||
replica_function = 'pg_catalog.pg_last_xlog_replay_location()'
|
||||
|
||||
lsn = master.safe_psql(
|
||||
'postgres',
|
||||
'SELECT {0}'.format(master_function)).rstrip()
|
||||
|
||||
# Wait until replica catch up with master
|
||||
replica.poll_query_until(
|
||||
'postgres',
|
||||
"SELECT '{0}'::pg_lsn <= {1}".format(lsn, replica_function))
|
||||
|
||||
def get_version(self, node):
|
||||
return self.version_to_num(
|
||||
|
@ -513,7 +513,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
backup_id = self.show_pb(backup_dir, "node")[1]["id"]
|
||||
self.merge_backup(backup_dir, "node", backup_id)
|
||||
self.merge_backup(backup_dir, "node", backup_id, options=["-j", "4"])
|
||||
|
||||
# RESTORE
|
||||
node_restored = self.make_simple_node(
|
||||
|
173
tests/ptrack.py
173
tests/ptrack.py
@ -32,7 +32,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# PTRACK BACKUP
|
||||
try:
|
||||
@ -82,7 +82,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.backup_node(backup_dir, 'node', node, options=['--stream'])
|
||||
@ -146,7 +146,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
con = node.connect("postgres")
|
||||
@ -178,7 +178,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -208,7 +208,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
@ -275,7 +275,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -308,7 +308,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
node.safe_psql(
|
||||
@ -367,7 +367,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -397,7 +397,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node_restored.cleanup()
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
@ -437,7 +437,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
@ -468,7 +468,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
@ -509,7 +509,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.data_dir, ignore_ptrack=False)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
node.start()
|
||||
node.slow_start()
|
||||
# Logical comparison
|
||||
self.assertEqual(
|
||||
result,
|
||||
@ -541,7 +541,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql("postgres", "create sequence t_seq")
|
||||
@ -637,7 +637,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -743,7 +743,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -827,7 +827,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -898,7 +898,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -978,7 +978,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
@ -1027,7 +1027,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
# DROP DATABASE DB1
|
||||
node.safe_psql(
|
||||
@ -1057,7 +1057,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# START RESTORED NODE
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
node_restored.start()
|
||||
node_restored.slow_start()
|
||||
|
||||
try:
|
||||
node_restored.safe_psql('db1', 'select 1')
|
||||
@ -1078,6 +1078,110 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_create_db_on_replica(self):
|
||||
"""
|
||||
Make node, take full backup, create replica from it,
|
||||
take full backup from replica,
|
||||
create database db1, take ptrack backup from replica,
|
||||
restore database and check it presense
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30s',
|
||||
'ptrack_enable': 'on',
|
||||
'autovacuum': 'off'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10'])
|
||||
|
||||
self.restore_node(backup_dir, 'node', replica)
|
||||
|
||||
# Add replica
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(node, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(node.port)
|
||||
]
|
||||
)
|
||||
|
||||
# CREATE DATABASE DB1
|
||||
node.safe_psql("postgres", "create database db1")
|
||||
node.safe_psql(
|
||||
"db1",
|
||||
"create table t_heap as select i as id, md5(i::text) as text, "
|
||||
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
|
||||
|
||||
# Wait until replica catch up with master
|
||||
self.wait_until_replica_catch_with_master(node, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# PTRACK BACKUP
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'replica',
|
||||
replica, backup_type='ptrack',
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(node.port)
|
||||
]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(replica.data_dir)
|
||||
|
||||
# RESTORE
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname)
|
||||
)
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', node_restored,
|
||||
backup_id=backup_id, options=["-j", "4"])
|
||||
|
||||
# COMPARE PHYSICAL CONTENT
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(
|
||||
node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_alter_table_set_tablespace_ptrack(self):
|
||||
"""Make node, create tablespace with table, take full backup,
|
||||
@ -1098,7 +1202,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
@ -1195,7 +1299,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# FULL BACKUP
|
||||
self.backup_node(backup_dir, 'node', node, options=["--stream"])
|
||||
@ -1237,7 +1341,8 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# START RESTORED NODE
|
||||
node_restored.start()
|
||||
node_restored.port = node.port
|
||||
node_restored.slow_start()
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1265,7 +1370,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
@ -1307,7 +1412,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node.cleanup()
|
||||
shutil.rmtree(tblspace, ignore_errors=True)
|
||||
self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
tblspc_exist = node.safe_psql(
|
||||
"postgres",
|
||||
@ -1347,7 +1452,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
tblspc_path = self.get_tblspace_path(node, 'somedata')
|
||||
@ -1463,7 +1568,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
@ -1481,6 +1586,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
pgbench = node.pgbench(options=['-T', '150', '-c', '2', '--no-vacuum'])
|
||||
pgbench.wait()
|
||||
|
||||
node.safe_psql("postgres", "checkpoint")
|
||||
|
||||
idx_ptrack['new_size'] = self.get_fork_size(
|
||||
@ -1495,10 +1601,15 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
node,
|
||||
idx_ptrack['path']
|
||||
)
|
||||
self.check_ptrack_sanity(idx_ptrack)
|
||||
|
||||
if not self.check_ptrack_sanity(idx_ptrack):
|
||||
self.assertTrue(
|
||||
False, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# GET LOGICAL CONTENT FROM NODE
|
||||
result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# it`s stupid, because hint`s are ignored by ptrack
|
||||
#result = node.safe_psql("postgres", "select * from pgbench_accounts")
|
||||
# FIRTS PTRACK BACKUP
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
@ -1538,7 +1649,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
)
|
||||
|
||||
# COMPARE RESTORED FILES
|
||||
self.assertEqual(result, result_new, 'data is lost')
|
||||
#self.assertEqual(result, result_new, 'data is lost')
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -1562,7 +1673,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
|
@ -33,11 +33,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql(
|
||||
@ -151,10 +152,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, "
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,256) i")
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql(
|
||||
|
@ -14,10 +14,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_cluster_on_btree(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -28,11 +33,19 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, nextval('t_seq') as t_seq, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -46,12 +59,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_btree')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -62,10 +78,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -73,10 +95,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_gist(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -85,11 +112,18 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -103,12 +137,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'cluster t_heap using t_gist')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -119,10 +156,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# Compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -130,10 +173,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_btree_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -141,7 +189,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -154,15 +203,30 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--stream', '--master-host=localhost',
|
||||
'--master-db=postgres', '--master-port={0}'.format(
|
||||
master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -172,13 +236,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'cluster t_heap using t_btree')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -189,21 +256,32 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
#@unittest.skip("skip")
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_cluster_on_gist_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
@ -211,7 +289,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -224,15 +303,34 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, nextval('t_seq') as t_seq, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"nextval('t_seq') as t_seq, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--stream', '--master-host=localhost',
|
||||
'--master-db=postgres', '--master-port={0}'.format(
|
||||
master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -242,13 +340,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'cluster t_heap using t_gist')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -259,10 +360,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# Compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
183
tests/ptrack_empty.py
Normal file
183
tests/ptrack_empty.py
Normal file
@ -0,0 +1,183 @@
|
||||
import os
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
|
||||
import time
|
||||
|
||||
|
||||
module_name = 'ptrack_clean'
|
||||
|
||||
|
||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean(self):
|
||||
"""Take backups of every available types and check that PTRACK is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'autovacuum': 'off'})
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
|
||||
"tablespace somedata")
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node,
|
||||
options=['-j10', '--stream'])
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
node_restored.cleanup()
|
||||
|
||||
tblspace1 = self.get_tblspace_path(node, 'somedata')
|
||||
tblspace2 = self.get_tblspace_path(node_restored, 'somedata')
|
||||
|
||||
# Take PTRACK backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir, 'node', node, backup_type='ptrack',
|
||||
options=['-j10', '--log-level-file=verbose'])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored,
|
||||
backup_id=backup_id,
|
||||
options=[
|
||||
"-j", "4",
|
||||
"-T{0}={1}".format(tblspace1, tblspace2)]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_clean_replica(self):
|
||||
"""Take backups of every available types from master and check that PTRACK on replica is clean"""
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
|
||||
# Create table
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap "
|
||||
"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
# Take FULL backup
|
||||
self.backup_node(
|
||||
backup_dir,
|
||||
'replica',
|
||||
replica,
|
||||
options=[
|
||||
'-j10', '--stream',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname))
|
||||
node_restored.cleanup()
|
||||
|
||||
# Take PTRACK backup
|
||||
backup_id = self.backup_node(
|
||||
backup_dir,
|
||||
'replica',
|
||||
replica,
|
||||
backup_type='ptrack',
|
||||
options=[
|
||||
'-j10', '--stream',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
if self.paranoia:
|
||||
pgdata = self.pgdata_content(replica.data_dir)
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'replica', node_restored,
|
||||
backup_id=backup_id,
|
||||
options=["-j", "4"]
|
||||
)
|
||||
|
||||
if self.paranoia:
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
@ -12,10 +12,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -24,19 +29,31 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql("postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text,md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
# Move table and indexes and make checkpoint
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] == 'heap':
|
||||
node.safe_psql('postgres', 'alter table {0} set tablespace somedata;'.format(i))
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'alter table {0} set tablespace somedata;'.format(i))
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql('postgres', 'alter index {0} set tablespace somedata'.format(i))
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'alter index {0} set tablespace somedata'.format(i))
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Check ptrack files
|
||||
|
@ -13,10 +13,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_recovery(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
@ -25,13 +30,22 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table
|
||||
node.safe_psql("postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text,md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
# Create indexes
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
|
||||
|
@ -8,29 +8,43 @@ module_name = 'ptrack_truncate'
|
||||
|
||||
class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_truncate(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'truncate t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -45,7 +59,9 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
@ -54,21 +70,28 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
def test_ptrack_truncate_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -76,18 +99,27 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
self.create_tblspace_in_node(master, 'somedata')
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
replica.safe_psql('postgres', 'truncate t_heap')
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
@ -100,17 +132,28 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'truncate t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -121,10 +164,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,25 +12,39 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
@ -45,7 +59,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make full backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
@ -56,6 +71,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -66,10 +83,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -77,18 +100,25 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_vacuum_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -96,20 +126,43 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Make FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica, options=[
|
||||
'-j10', '--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get fork size and calculate it in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -119,20 +172,17 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Make FULL backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
for i in idx_ptrack:
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
|
||||
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Delete some rows, vacuum it and make checkpoint
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes and calculate it in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -143,10 +193,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,28 +12,48 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_bits_frozen(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -43,11 +63,8 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -58,10 +75,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -69,18 +92,24 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.skip("skip")
|
||||
def test_ptrack_vacuum_bits_frozen_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -88,19 +117,39 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -110,13 +159,15 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'vacuum freeze t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -127,10 +178,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,28 +12,45 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_bits_visibility(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -43,11 +60,11 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -58,10 +75,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, idx_ptrack
|
||||
import time
|
||||
|
||||
|
||||
module_name = 'ptrack_vacuum_full'
|
||||
@ -12,29 +13,44 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_full(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} "
|
||||
"using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -44,12 +60,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
node.safe_psql('postgres', 'vacuum full t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -60,10 +76,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity, the most important part
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -72,17 +94,23 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_full_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on', 'wal_level': 'replica',
|
||||
'max_wal_senders': '2', 'autovacuum': 'off',
|
||||
'checkpoint_timeout': '30s'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -90,20 +118,42 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,127) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
|
||||
"tsvector from generate_series(0,256000) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'],
|
||||
idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
]
|
||||
)
|
||||
# TODO: check that all ptrack are nullified
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -113,14 +163,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take FULL backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
|
||||
master.safe_psql('postgres', 'vacuum full t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Sync master and replica
|
||||
self.wait_until_replica_catch_with_master(master, replica)
|
||||
replica.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -131,10 +183,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity, the most important part
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -12,29 +12,44 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_truncate(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
self.create_tblspace_in_node(node, 'somedata')
|
||||
|
||||
# Create table and indexes
|
||||
res = node.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap tablespace somedata as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap tablespace somedata "
|
||||
"as select i as id, md5(i::text) as text, "
|
||||
"md5(repeat(i::text,10))::tsvector as tsvector "
|
||||
"from generate_series(0,2560) i")
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
node.safe_psql("postgres", "create index {0} on {1} using {2}({3}) tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
node.safe_psql(
|
||||
"postgres", "create index {0} on {1} using {2}({3}) "
|
||||
"tablespace somedata".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
|
||||
@ -44,12 +59,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream'])
|
||||
|
||||
node.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
node.safe_psql('postgres', 'vacuum t_heap')
|
||||
node.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(node, i)
|
||||
@ -60,31 +75,43 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
node, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_ptrack_vacuum_truncate_replica(self):
|
||||
fname = self.id().split('.')[3]
|
||||
master = self.make_simple_node(base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
master = self.make_simple_node(
|
||||
base_dir="{0}/{1}/master".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'ptrack_enable': 'on', 'wal_level': 'replica', 'max_wal_senders': '2'})
|
||||
pg_options={
|
||||
'ptrack_enable': 'on',
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'})
|
||||
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'master', master)
|
||||
master.start()
|
||||
master.slow_start()
|
||||
|
||||
self.backup_node(backup_dir, 'master', master, options=['--stream'])
|
||||
|
||||
replica = self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica = self.make_simple_node(
|
||||
base_dir="{0}/{1}/replica".format(module_name, fname))
|
||||
replica.cleanup()
|
||||
|
||||
self.restore_node(backup_dir, 'master', replica)
|
||||
@ -92,20 +119,38 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
self.add_instance(backup_dir, 'replica', replica)
|
||||
self.set_replica(master, replica, 'replica', synchronous=True)
|
||||
self.set_archiving(backup_dir, 'replica', replica, replica=True)
|
||||
replica.start()
|
||||
replica.slow_start(replica=True)
|
||||
|
||||
# Create table and indexes
|
||||
master.safe_psql(
|
||||
"postgres",
|
||||
"create sequence t_seq; create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i")
|
||||
"create extension bloom; create sequence t_seq; "
|
||||
"create table t_heap as select i as id, "
|
||||
"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
|
||||
"as tsvector from generate_series(0,2560) i")
|
||||
|
||||
for i in idx_ptrack:
|
||||
if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
|
||||
master.safe_psql("postgres", "create index {0} on {1} using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'], idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
master.safe_psql(
|
||||
"postgres", "create index {0} on {1} "
|
||||
"using {2}({3})".format(
|
||||
i, idx_ptrack[i]['relation'],
|
||||
idx_ptrack[i]['type'], idx_ptrack[i]['column']))
|
||||
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(
|
||||
backup_dir, 'replica', replica,
|
||||
options=[
|
||||
'-j10',
|
||||
'--master-host=localhost',
|
||||
'--master-db=postgres',
|
||||
'--master-port={0}'.format(master.port)
|
||||
]
|
||||
)
|
||||
|
||||
for i in idx_ptrack:
|
||||
# get size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
|
||||
@ -115,14 +160,12 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
|
||||
|
||||
# Take PTRACK backup to clean every ptrack
|
||||
self.backup_node(backup_dir, 'replica', replica, options=['-j10',
|
||||
'--master-host=localhost', '--master-db=postgres', '--master-port={0}'.format(master.port)])
|
||||
|
||||
master.safe_psql('postgres', 'delete from t_heap where id > 128;')
|
||||
master.safe_psql('postgres', 'vacuum t_heap')
|
||||
master.safe_psql('postgres', 'checkpoint')
|
||||
|
||||
# CHECK PTRACK SANITY
|
||||
success = True
|
||||
for i in idx_ptrack:
|
||||
# get new size of heap and indexes. size calculated in pages
|
||||
idx_ptrack[i]['new_size'] = self.get_fork_size(replica, i)
|
||||
@ -133,10 +176,16 @@ class SimpleTest(ProbackupTest, unittest.TestCase):
|
||||
idx_ptrack[i]['path'], idx_ptrack[i]['new_size'])
|
||||
# get ptrack for every idx
|
||||
idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
|
||||
replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
replica, idx_ptrack[i]['path'],
|
||||
[idx_ptrack[i]['old_size'], idx_ptrack[i]['new_size']])
|
||||
|
||||
# compare pages and check ptrack sanity
|
||||
self.check_ptrack_sanity(idx_ptrack[i])
|
||||
if not self.check_ptrack_sanity(idx_ptrack[i]):
|
||||
success = False
|
||||
|
||||
self.assertTrue(
|
||||
success, 'Ptrack has failed to register changes in data files'
|
||||
)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -1242,7 +1242,7 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_zags_block_corrupt(self):
|
||||
fname = self.id().split('.')[3]
|
||||
@ -1314,8 +1314,129 @@ class RestoreTest(ProbackupTest, unittest.TestCase):
|
||||
backup_dir, 'node', node_restored)
|
||||
|
||||
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
||||
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.slow_start()
|
||||
exit(1)
|
||||
|
||||
@unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_zags_block_corrupt_1(self):
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'autovacuum': 'off',
|
||||
'full_page_writes': 'on'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
node.safe_psql('postgres', 'create table tbl(i int)')
|
||||
|
||||
node.safe_psql('postgres', 'create index idx ON tbl (i)')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100000) as i')
|
||||
|
||||
print(node.safe_psql(
|
||||
'postgres',
|
||||
"select pg_relation_size('idx')"))
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'delete from tbl where i%2 = 0')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'explain analyze select i from tbl order by i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'select i from tbl order by i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'create extension pageinspect')
|
||||
|
||||
print(node.safe_psql(
|
||||
'postgres',
|
||||
"select * from bt_page_stats('idx',1)"))
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'checkpoint')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
'insert into tbl select i from generate_series(0,100) as i')
|
||||
|
||||
self.switch_wal_segment(node)
|
||||
|
||||
node_restored = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node_restored".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
|
||||
pgdata = self.pgdata_content(node.data_dir)
|
||||
|
||||
node_restored.cleanup()
|
||||
|
||||
self.restore_node(
|
||||
backup_dir, 'node', node_restored)
|
||||
|
||||
node_restored.append_conf("postgresql.auto.conf", "archive_mode = 'off'")
|
||||
node_restored.append_conf("postgresql.auto.conf", "hot_standby = 'on'")
|
||||
node_restored.append_conf(
|
||||
"postgresql.auto.conf", "port = {0}".format(node_restored.port))
|
||||
|
||||
node_restored.slow_start()
|
||||
|
||||
while True:
|
||||
with open(node_restored.pg_log_file, 'r') as f:
|
||||
if 'selected new timeline ID' in f.read():
|
||||
break
|
||||
|
||||
with open(node_restored.pg_log_file, 'r') as f:
|
||||
print(f.read())
|
||||
|
||||
pgdata_restored = self.pgdata_content(node_restored.data_dir)
|
||||
|
||||
self.compare_pgdata(pgdata, pgdata_restored)
|
||||
|
||||
# pg_xlogdump_path = self.get_bin_path('pg_xlogdump')
|
||||
|
||||
# pg_xlogdump = self.run_binary(
|
||||
# [
|
||||
# pg_xlogdump_path, '-b',
|
||||
# os.path.join(backup_dir, 'wal', 'node', '000000010000000000000003'),
|
||||
# ' | ', 'grep', 'Btree', ''
|
||||
# ], async=False)
|
||||
|
||||
if pg_xlogdump.returncode:
|
||||
self.assertFalse(
|
||||
True,
|
||||
'Failed to start pg_wal_dump: {0}'.format(
|
||||
pg_receivexlog.communicate()[1]))
|
||||
|
@ -135,7 +135,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
backup_id, "backup.control")
|
||||
os.remove(file)
|
||||
|
||||
self.assertIn('control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
self.assertIn('Control file "{0}" doesn\'t exist'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
@ -165,7 +165,7 @@ class OptionTest(ProbackupTest, unittest.TestCase):
|
||||
fd = open(file, 'w')
|
||||
fd.close()
|
||||
|
||||
self.assertIn('control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
self.assertIn('Control file "{0}" is empty'.format(file), self.show_pb(backup_dir, 'node', as_text=True))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
@ -5,6 +5,7 @@ from datetime import datetime, timedelta
|
||||
import subprocess
|
||||
from sys import exit
|
||||
import time
|
||||
import hashlib
|
||||
|
||||
|
||||
module_name = 'validate'
|
||||
@ -12,6 +13,57 @@ module_name = 'validate'
|
||||
|
||||
class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_validate_nullified_heap_page_backup(self):
|
||||
"""
|
||||
make node with nullified heap block
|
||||
"""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=3)
|
||||
|
||||
file_path = node.safe_psql(
|
||||
"postgres",
|
||||
"select pg_relation_filepath('pgbench_accounts')").rstrip()
|
||||
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CHECKPOINT")
|
||||
|
||||
# Nullify some block in PostgreSQL
|
||||
file = os.path.join(node.data_dir, file_path)
|
||||
with open(file, 'r+b') as f:
|
||||
f.seek(8192)
|
||||
f.write(b"\x00"*8192)
|
||||
f.flush()
|
||||
f.close
|
||||
|
||||
self.backup_node(
|
||||
backup_dir, 'node', node, options=["--log-level-file=verbose"])
|
||||
|
||||
log_file_path = os.path.join(backup_dir, "log", "pg_probackup.log")
|
||||
|
||||
with open(log_file_path) as f:
|
||||
self.assertTrue(
|
||||
'LOG: File: {0} blknum 1, empty page'.format(file) in f.read(),
|
||||
'Failed to detect nullified block')
|
||||
|
||||
self.validate_pb(backup_dir)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
# @unittest.expectedFailure
|
||||
def test_validate_wal_unreal_values(self):
|
||||
@ -29,7 +81,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
node.slow_start()
|
||||
|
||||
node.pgbench_init(scale=3)
|
||||
with node.connect("postgres") as con:
|
||||
@ -198,7 +250,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Corrupt some file
|
||||
file = os.path.join(
|
||||
backup_dir, 'backups/node', backup_id_2, 'database', file_path)
|
||||
with open(file, "rb+", 0) as f:
|
||||
with open(file, "r+b", 0) as f:
|
||||
f.seek(42)
|
||||
f.write(b"blah")
|
||||
f.flush()
|
||||
@ -219,6 +271,8 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
'INFO: Validating parents for backup {0}'.format(
|
||||
backup_id_2) in e.message and
|
||||
'ERROR: Backup {0} is corrupt'.format(
|
||||
backup_id_2) in e.message and
|
||||
'WARNING: Backup {0} data files are corrupted'.format(
|
||||
backup_id_2) in e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
@ -1530,7 +1584,10 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
||||
pg_options={
|
||||
'wal_level': 'replica',
|
||||
'max_wal_senders': '2',
|
||||
'checkpoint_timeout': '30'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
@ -1561,6 +1618,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
pass
|
||||
|
||||
self.assertTrue(
|
||||
self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR')
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
@ -1709,11 +1767,12 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK')
|
||||
|
||||
os.rename(file_new, file)
|
||||
|
||||
file = os.path.join(
|
||||
backup_dir, 'backups', 'node',
|
||||
backup_id_page, 'database', 'postgresql.auto.conf')
|
||||
backup_id_page, 'database', 'backup_label')
|
||||
|
||||
file_new = os.path.join(backup_dir, 'postgresql.auto.conf')
|
||||
file_new = os.path.join(backup_dir, 'backup_label')
|
||||
os.rename(file, file_new)
|
||||
|
||||
try:
|
||||
@ -1785,9 +1844,9 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
|
||||
file = os.path.join(
|
||||
backup_dir, 'backups', 'node',
|
||||
corrupt_id, 'database', 'postgresql.auto.conf')
|
||||
corrupt_id, 'database', 'backup_label')
|
||||
|
||||
file_new = os.path.join(backup_dir, 'postgresql.auto.conf')
|
||||
file_new = os.path.join(backup_dir, 'backup_label')
|
||||
os.rename(file, file_new)
|
||||
|
||||
try:
|
||||
@ -3006,4 +3065,74 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_corrupt_pg_control_via_resetxlog(self):
|
||||
""" PGPRO-2096 """
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir="{0}/{1}/node".format(module_name, fname),
|
||||
set_replication=True,
|
||||
initdb_params=['--data-checksums'],
|
||||
pg_options={'wal_level': 'replica', 'max_wal_senders': '2'}
|
||||
)
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
self.set_archiving(backup_dir, 'node', node)
|
||||
node.start()
|
||||
|
||||
backup_id = self.backup_node(backup_dir, 'node', node)
|
||||
|
||||
if self.get_version(node) < 100000:
|
||||
pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
|
||||
wal_dir = 'pg_xlog'
|
||||
else:
|
||||
pg_resetxlog_path = self.get_bin_path('pg_resetwal')
|
||||
wal_dir = 'pg_wal'
|
||||
|
||||
os.mkdir(
|
||||
os.path.join(
|
||||
backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status'))
|
||||
|
||||
pg_control_path = os.path.join(
|
||||
backup_dir, 'backups', 'node',
|
||||
backup_id, 'database', 'global', 'pg_control')
|
||||
|
||||
md5_before = hashlib.md5(
|
||||
open(pg_control_path, 'rb').read()).hexdigest()
|
||||
|
||||
self.run_binary(
|
||||
[
|
||||
pg_resetxlog_path,
|
||||
os.path.join(backup_dir, 'backups', 'node', backup_id, 'database'),
|
||||
'-o 42',
|
||||
'-f'
|
||||
],
|
||||
async=False)
|
||||
|
||||
md5_after = hashlib.md5(
|
||||
open(pg_control_path, 'rb').read()).hexdigest()
|
||||
|
||||
if self.verbose:
|
||||
print('\n MD5 BEFORE resetxlog: {0}\n MD5 AFTER resetxlog: {1}'.format(
|
||||
md5_before, md5_after))
|
||||
|
||||
# Validate backup
|
||||
try:
|
||||
self.validate_pb(backup_dir, 'node')
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because of pg_control change.\n "
|
||||
"Output: {0} \n CMD: {1}".format(
|
||||
self.output, self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
'data files are corrupted',
|
||||
e.message,
|
||||
'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# validate empty backup list
|
||||
|
Loading…
x
Reference in New Issue
Block a user