diff --git a/src/backup.c b/src/backup.c index b61910b7..bd5ee590 100644 --- a/src/backup.c +++ b/src/backup.c @@ -115,6 +115,7 @@ static void *StreamLog(void *arg); static void get_remote_pgdata_filelist(parray *files); static void ReceiveFileList(parray* files, PGconn *conn, PGresult *res, int rownum); static void remote_copy_file(PGconn *conn, pgFile* file); +static void check_external_for_tablespaces(parray *external_list); /* Ptrack functions */ static void pg_ptrack_clear(void); @@ -468,6 +469,7 @@ do_backup_instance(void) { int i; char database_path[MAXPGPATH]; + char external_prefix[MAXPGPATH]; /* Temp value. Used as template */ char dst_backup_path[MAXPGPATH]; char label[1024]; XLogRecPtr prev_backup_start_lsn = InvalidXLogRecPtr; @@ -480,10 +482,16 @@ do_backup_instance(void) pgBackup *prev_backup = NULL; parray *prev_backup_filelist = NULL; parray *backup_list = NULL; + parray *external_dirs = NULL; pgFile *pg_control = NULL; elog(LOG, "Database backup start"); + if(current.external_dir_str) + { + external_dirs = make_external_directory_list(current.external_dir_str); + check_external_for_tablespaces(external_dirs); + } /* Initialize size summary */ current.data_bytes = 0; @@ -537,7 +545,7 @@ do_backup_instance(void) pgBackupGetPath(prev_backup, prev_backup_filelist_path, lengthof(prev_backup_filelist_path), DATABASE_FILE_LIST); /* Files of previous backup needed by DELTA backup */ - prev_backup_filelist = dir_read_file_list(NULL, prev_backup_filelist_path); + prev_backup_filelist = dir_read_file_list(NULL, NULL, prev_backup_filelist_path); /* If lsn is not NULL, only pages with higher lsn will be copied. */ prev_backup_start_lsn = prev_backup->start_lsn; @@ -579,6 +587,8 @@ do_backup_instance(void) pgBackupGetPath(¤t, database_path, lengthof(database_path), DATABASE_DIR); + pgBackupGetPath(¤t, external_prefix, lengthof(external_prefix), + EXTERNAL_DIR); /* start stream replication */ if (stream_wal) @@ -632,8 +642,18 @@ do_backup_instance(void) if (is_remote_backup) get_remote_pgdata_filelist(backup_files_list); else - dir_list_file(backup_files_list, instance_config.pgdata, - true, true, false); + dir_list_file(backup_files_list, instance_config.pgdata, true, true, false, 0); + + /* + * Append to backup list all files and directories + * from external directory option + */ + if (external_dirs) + for (i = 0; i < parray_num(external_dirs); i++) + /* External dirs numeration starts with 1. + * 0 value is not external dir */ + dir_list_file(backup_files_list, parray_get(external_dirs, i), + false, true, false, i+1); /* Sanity check for backup_files_list, thank you, Windows: * https://github.com/postgrespro/pg_probackup/issues/48 @@ -701,18 +721,28 @@ do_backup_instance(void) { char dirpath[MAXPGPATH]; char *dir_name; - char database_path[MAXPGPATH]; if (!is_remote_backup) - dir_name = GetRelativePath(file->path, instance_config.pgdata); + if (file->external_dir_num) + dir_name = GetRelativePath(file->path, + parray_get(external_dirs, + file->external_dir_num - 1)); + else + dir_name = GetRelativePath(file->path, instance_config.pgdata); else dir_name = file->path; elog(VERBOSE, "Create directory \"%s\"", dir_name); - pgBackupGetPath(¤t, database_path, lengthof(database_path), - DATABASE_DIR); - join_path_components(dirpath, database_path, dir_name); + if (file->external_dir_num) + { + char temp[MAXPGPATH]; + snprintf(temp, MAXPGPATH, "%s%d", external_prefix, + file->external_dir_num); + join_path_components(dirpath, temp, dir_name); + } + else + join_path_components(dirpath, database_path, dir_name); dir_create_dir(dirpath, DIR_PERMISSION); } @@ -724,7 +754,7 @@ do_backup_instance(void) parray_qsort(backup_files_list, pgFileCompareSize); /* Sort the array for binary search */ if (prev_backup_filelist) - parray_qsort(prev_backup_filelist, pgFileComparePath); + parray_qsort(prev_backup_filelist, pgFileComparePathWithExternal); /* init thread args with own file lists */ threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); @@ -736,6 +766,8 @@ do_backup_instance(void) arg->from_root = instance_config.pgdata; arg->to_root = database_path; + arg->external_prefix = external_prefix; + arg->external_dirs = external_dirs; arg->files_list = backup_files_list; arg->prev_filelist = prev_backup_filelist; arg->prev_start_lsn = prev_backup_start_lsn; @@ -830,7 +862,7 @@ do_backup_instance(void) /* Scan backup PG_XLOG_DIR */ xlog_files_list = parray_new(); join_path_components(pg_xlog_path, database_path, PG_XLOG_DIR); - dir_list_file(xlog_files_list, pg_xlog_path, false, true, false); + dir_list_file(xlog_files_list, pg_xlog_path, false, true, false, 0); for (i = 0; i < parray_num(xlog_files_list); i++) { @@ -854,7 +886,12 @@ do_backup_instance(void) } /* Print the list of files to backup catalog */ - write_backup_filelist(¤t, backup_files_list, instance_config.pgdata); + write_backup_filelist(¤t, backup_files_list, instance_config.pgdata, + NULL, external_dirs); + + /* clean external directories list */ + if (external_dirs) + free_dir_list(external_dirs); /* Compute summary of size of regular files in the backup */ for (i = 0; i < parray_num(backup_files_list); i++) @@ -983,6 +1020,13 @@ do_backup(time_t start_time) StrNCpy(current.program_version, PROGRAM_VERSION, sizeof(current.program_version)); + /* Save list of external directories */ + if (instance_config.external_dir_str && + pg_strcasecmp(instance_config.external_dir_str, "none") != 0) + { + current.external_dir_str = instance_config.external_dir_str; + } + /* Create backup directory and BACKUP_CONTROL_FILE */ if (pgBackupCreateDir(¤t)) elog(ERROR, "Cannot create backup directory"); @@ -2036,7 +2080,7 @@ pg_stop_backup(pgBackup *backup) */ if (backup_files_list) { - file = pgFileNew(backup_label, true); + file = pgFileNew(backup_label, true, 0); calc_file_checksum(file); free(file->path); file->path = strdup(PG_BACKUP_LABEL_FILE); @@ -2080,7 +2124,7 @@ pg_stop_backup(pgBackup *backup) if (backup_files_list) { - file = pgFileNew(tablespace_map, true); + file = pgFileNew(tablespace_map, true, 0); if (S_ISREG(file->mode)) calc_file_checksum(file); free(file->path); @@ -2281,6 +2325,11 @@ backup_files(void *arg) if (S_ISREG(buf.st_mode)) { pgFile **prev_file = NULL; + char *external_path = NULL; + + if (file->external_dir_num) + external_path = parray_get(arguments->external_dirs, + file->external_dir_num - 1); /* Check that file exist in previous backup */ if (current.backup_mode != BACKUP_MODE_FULL) @@ -2288,11 +2337,13 @@ backup_files(void *arg) char *relative; pgFile key; - relative = GetRelativePath(file->path, arguments->from_root); + relative = GetRelativePath(file->path, file->external_dir_num ? + external_path : arguments->from_root); key.path = relative; + key.external_dir_num = file->external_dir_num; prev_file = (pgFile **) parray_bsearch(arguments->prev_filelist, - &key, pgFileComparePath); + &key, pgFileComparePathWithExternal); if (prev_file) /* File exists in previous backup */ file->exists_in_prev = true; @@ -2319,12 +2370,16 @@ backup_files(void *arg) continue; } } - else if (strcmp(file->name, "pg_control") == 0) + else if (!file->external_dir_num && + strcmp(file->name, "pg_control") == 0) copy_pgcontrol_file(arguments->from_root, arguments->to_root, file); else { + const char *src; + const char *dst; bool skip = false; + char external_dst[MAXPGPATH]; /* If non-data file has not changed since last backup... */ if (prev_file && file->exists_in_prev && @@ -2335,8 +2390,21 @@ backup_files(void *arg) if (EQ_TRADITIONAL_CRC32(file->crc, (*prev_file)->crc)) skip = true; /* ...skip copying file. */ } - if (skip || - !copy_file(arguments->from_root, arguments->to_root, file)) + /* Set file paths */ + if (file->external_dir_num) + { + makeExternalDirPathByNum(external_dst, + arguments->external_prefix, + file->external_dir_num); + src = external_path; + dst = external_dst; + } + else + { + src = arguments->from_root; + dst = arguments->to_root; + } + if (skip || !copy_file(src, dst, file)) { /* disappeared file not to be confused with 'not changed' */ if (file->write_size != FILE_NOT_FOUND) @@ -2933,3 +3001,42 @@ pg_ptrack_get_block(backup_files_arg *arguments, return result; } + +static void +check_external_for_tablespaces(parray *external_list) +{ + PGconn *conn; + PGresult *res; + int i = 0; + int j = 0; + char *tablespace_path = NULL; + char *query = "SELECT pg_catalog.pg_tablespace_location(oid)\n" + "FROM pg_tablespace\n" + "WHERE pg_catalog.pg_tablespace_location(oid) <> '';"; + + conn = backup_conn; + res = pgut_execute(conn, query, 0, NULL); + + /* Check successfull execution of query */ + if (!res) + elog(ERROR, "Failed to get list of tablespaces"); + + for (i = 0; i < res->ntups; i++) + { + tablespace_path = PQgetvalue(res, i, 0); + Assert (strlen(tablespace_path) > 0); + for (j = 0; j < parray_num(external_list); j++) + { + char *external_path = parray_get(external_list, j); + if (path_is_prefix_of_path(external_path, tablespace_path)) + elog(ERROR, "External directory path (-E option) \"%s\" " + "contains tablespace \"%s\"", + external_path, tablespace_path); + if (path_is_prefix_of_path(tablespace_path, external_path)) + elog(WARNING, "External directory path (-E option) \"%s\" " + "is in tablespace directory \"%s\"", + tablespace_path, external_path); + } + } + PQclear(res); +} diff --git a/src/catalog.c b/src/catalog.c index 59a0930e..86b51c87 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -473,7 +473,25 @@ pgBackupCreateDir(pgBackup *backup) { int i; char path[MAXPGPATH]; - char *subdirs[] = { DATABASE_DIR, NULL }; + parray *subdirs = parray_new(); + + parray_append(subdirs, pg_strdup(DATABASE_DIR)); + + /* Add external dirs containers */ + if (backup->external_dir_str) + { + parray *external_list; + + external_list = make_external_directory_list(backup->external_dir_str); + for (int i = 0; i < parray_num(external_list); i++) + { + char temp[MAXPGPATH]; + /* Numeration of externaldirs starts with 1 */ + makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1); + parray_append(subdirs, pg_strdup(temp)); + } + free_dir_list(external_list); + } pgBackupGetPath(backup, path, lengthof(path), NULL); @@ -483,12 +501,13 @@ pgBackupCreateDir(pgBackup *backup) dir_create_dir(path, DIR_PERMISSION); /* create directories for actual backup files */ - for (i = 0; subdirs[i]; i++) + for (i = 0; i < parray_num(subdirs); i++) { - pgBackupGetPath(backup, path, lengthof(path), subdirs[i]); + pgBackupGetPath(backup, path, lengthof(path), parray_get(subdirs, i)); dir_create_dir(path, DIR_PERMISSION); } + free_dir_list(subdirs); return 0; } @@ -566,6 +585,10 @@ pgBackupWriteControl(FILE *out, pgBackup *backup) /* print connection info except password */ if (backup->primary_conninfo) fprintf(out, "primary_conninfo = '%s'\n", backup->primary_conninfo); + + /* print external directories list */ + if (backup->external_dir_str) + fprintf(out, "external-dirs = '%s'\n", backup->external_dir_str); } /* @@ -612,7 +635,8 @@ write_backup(pgBackup *backup) * Output the list of files to backup catalog DATABASE_FILE_LIST */ void -write_backup_filelist(pgBackup *backup, parray *files, const char *root) +write_backup_filelist(pgBackup *backup, parray *files, const char *root, + const char *external_prefix, parray *external_list) { FILE *fp; char path[MAXPGPATH]; @@ -627,7 +651,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root) elog(ERROR, "Cannot open file list \"%s\": %s", path_temp, strerror(errno)); - print_file_list(fp, files, root); + print_file_list(fp, files, root, external_prefix, external_list); if (fflush(fp) != 0 || fsync(fileno(fp)) != 0 || @@ -692,6 +716,7 @@ readBackupControlFile(const char *path) {'u', 0, "compress-level", &backup->compress_level, SOURCE_FILE_STRICT}, {'b', 0, "from-replica", &backup->from_replica, SOURCE_FILE_STRICT}, {'s', 0, "primary-conninfo", &backup->primary_conninfo, SOURCE_FILE_STRICT}, + {'s', 0, "external-dirs", &backup->external_dir_str, SOURCE_FILE_STRICT}, {0} }; @@ -922,6 +947,7 @@ pgBackupInit(pgBackup *backup) backup->primary_conninfo = NULL; backup->program_version[0] = '\0'; backup->server_version[0] = '\0'; + backup->external_dir_str = NULL; } /* free pgBackup object */ @@ -931,6 +957,7 @@ pgBackupFree(void *backup) pgBackup *b = (pgBackup *) backup; pfree(b->primary_conninfo); + pfree(b->external_dir_str); pfree(backup); } diff --git a/src/configure.c b/src/configure.c index a9c80922..aea78244 100644 --- a/src/configure.c +++ b/src/configure.c @@ -61,6 +61,11 @@ ConfigOption instance_options[] = OPTION_INSTANCE_GROUP, 0, option_get_value }, #endif + { + 's', 'E', "external-dirs", + &instance_config.external_dir_str, SOURCE_CMD, 0, + OPTION_INSTANCE_GROUP, 0, option_get_value + }, /* Connection options */ { 's', 'd', "pgdatabase", diff --git a/src/delete.c b/src/delete.c index 9ee9b2d1..287e7377 100644 --- a/src/delete.c +++ b/src/delete.c @@ -290,7 +290,7 @@ delete_backup_files(pgBackup *backup) /* list files to be deleted */ files = parray_new(); pgBackupGetPath(backup, path, lengthof(path), NULL); - dir_list_file(files, path, false, true, true); + dir_list_file(files, path, false, true, true, 0); /* delete leaf node first */ parray_qsort(files, pgFileComparePathDesc); diff --git a/src/dir.c b/src/dir.c index dbf3f6c3..dcf1476d 100644 --- a/src/dir.c +++ b/src/dir.c @@ -121,14 +121,19 @@ static int BlackListCompare(const void *str1, const void *str2); static char dir_check_file(const char *root, pgFile *file); static void dir_list_file_internal(parray *files, const char *root, pgFile *parent, bool exclude, - bool omit_symlink, parray *black_list); + bool omit_symlink, parray *black_list, + int external_dir_num); static void list_data_directories(parray *files, const char *path, bool is_root, bool exclude); +static void opt_path_map(ConfigOption *opt, const char *arg, + TablespaceList *list, const char *type); /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; static TablespaceCreatedList tablespace_created_dirs = {NULL, NULL}; +/* Extra directories mapping */ +static TablespaceList external_remap_list = {NULL, NULL}; /* * Create directory, also create parent directories if necessary. @@ -157,7 +162,7 @@ dir_create_dir(const char *dir, mode_t mode) } pgFile * -pgFileNew(const char *path, bool omit_symlink) +pgFileNew(const char *path, bool omit_symlink, int external_dir_num) { struct stat st; pgFile *file; @@ -175,6 +180,7 @@ pgFileNew(const char *path, bool omit_symlink) file = pgFileInit(path); file->size = st.st_size; file->mode = st.st_mode; + file->external_dir_num = external_dir_num; return file; } @@ -225,6 +231,7 @@ pgFileInit(const char *path) /* Number of blocks readed during backup */ file->n_blocks = BLOCKNUM_INVALID; file->compress_alg = NOT_DEFINED_COMPRESS; + file->external_dir_num = 0; return file; } @@ -345,6 +352,30 @@ pgFileComparePath(const void *f1, const void *f2) return strcmp(f1p->path, f2p->path); } +/* + * Compare two pgFile with their path and external_dir_num + * in ascending order of ASCII code. + */ +int +pgFileComparePathWithExternal(const void *f1, const void *f2) +{ + pgFile *f1p = *(pgFile **)f1; + pgFile *f2p = *(pgFile **)f2; + int res; + + res = strcmp(f1p->path, f2p->path); + if (!res) + { + if (f1p->external_dir_num > f2p->external_dir_num) + return 1; + else if (f1p->external_dir_num < f2p->external_dir_num) + return -1; + else + return 0; + } + return res; +} + /* Compare two pgFile with their path in descending order of ASCII code. */ int pgFileComparePathDesc(const void *f1, const void *f2) @@ -352,6 +383,16 @@ pgFileComparePathDesc(const void *f1, const void *f2) return -pgFileComparePath(f1, f2); } +/* + * Compare two pgFile with their path and external_dir_num + * in descending order of ASCII code. + */ +int +pgFileComparePathWithExternalDesc(const void *f1, const void *f2) +{ + return -pgFileComparePathWithExternal(f1, f2); +} + /* Compare two pgFile with their linked directory path. */ int pgFileCompareLinked(const void *f1, const void *f2) @@ -392,7 +433,7 @@ BlackListCompare(const void *str1, const void *str2) */ void dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink, - bool add_root) + bool add_root, int external_dir_num) { pgFile *file; parray *black_list = NULL; @@ -431,19 +472,24 @@ dir_list_file(parray *files, const char *root, bool exclude, bool omit_symlink, parray_qsort(black_list, BlackListCompare); } - file = pgFileNew(root, false); + file = pgFileNew(root, external_dir_num ? omit_symlink : false, omit_symlink); if (file == NULL) return; if (!S_ISDIR(file->mode)) { - elog(WARNING, "Skip \"%s\": unexpected file format", file->path); + if (external_dir_num) + elog(ERROR, " --external-dirs option \"%s\": directory or symbolic link expected", + file->path); + else + elog(WARNING, "Skip \"%s\": unexpected file format", file->path); return; } if (add_root) parray_append(files, file); - dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list); + dir_list_file_internal(files, root, file, exclude, omit_symlink, black_list, + external_dir_num); if (!add_root) pgFileFree(file); @@ -662,7 +708,8 @@ dir_check_file(const char *root, pgFile *file) */ static void dir_list_file_internal(parray *files, const char *root, pgFile *parent, - bool exclude, bool omit_symlink, parray *black_list) + bool exclude, bool omit_symlink, parray *black_list, + int external_dir_num) { DIR *dir; struct dirent *dent; @@ -692,7 +739,7 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent, join_path_components(child, parent->path, dent->d_name); - file = pgFileNew(child, omit_symlink); + file = pgFileNew(child, omit_symlink, external_dir_num); if (file == NULL) continue; @@ -749,7 +796,7 @@ dir_list_file_internal(parray *files, const char *root, pgFile *parent, */ if (S_ISDIR(file->mode)) dir_list_file_internal(files, root, file, exclude, omit_symlink, - black_list); + black_list, external_dir_num); } if (errno && errno != ENOENT) @@ -829,7 +876,7 @@ list_data_directories(parray *files, const char *path, bool is_root, { pgFile *dir; - dir = pgFileNew(path, false); + dir = pgFileNew(path, false, 0); parray_append(files, dir); } @@ -896,13 +943,14 @@ get_tablespace_created(const char *link) } /* - * Split argument into old_dir and new_dir and append to tablespace mapping + * Split argument into old_dir and new_dir and append to mapping * list. * * Copy of function tablespace_list_append() from pg_basebackup.c. */ -void -opt_tablespace_map(ConfigOption *opt, const char *arg) +static void +opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, + const char *type) { TablespaceListCell *cell = pgut_new(TablespaceListCell); char *dst; @@ -921,7 +969,7 @@ opt_tablespace_map(ConfigOption *opt, const char *arg) else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\')) { if (*cell->new_dir) - elog(ERROR, "multiple \"=\" signs in tablespace mapping\n"); + elog(ERROR, "multiple \"=\" signs in %s mapping\n", type); else dst = dst_ptr = cell->new_dir; } @@ -930,8 +978,8 @@ opt_tablespace_map(ConfigOption *opt, const char *arg) } if (!*cell->old_dir || !*cell->new_dir) - elog(ERROR, "invalid tablespace mapping format \"%s\", " - "must be \"OLDDIR=NEWDIR\"", arg); + elog(ERROR, "invalid %s mapping format \"%s\", " + "must be \"OLDDIR=NEWDIR\"", type, arg); /* * This check isn't absolutely necessary. But all tablespaces are created @@ -940,18 +988,32 @@ opt_tablespace_map(ConfigOption *opt, const char *arg) * consistent with the new_dir check. */ if (!is_absolute_path(cell->old_dir)) - elog(ERROR, "old directory is not an absolute path in tablespace mapping: %s\n", - cell->old_dir); + elog(ERROR, "old directory is not an absolute path in %s mapping: %s\n", + type, cell->old_dir); if (!is_absolute_path(cell->new_dir)) - elog(ERROR, "new directory is not an absolute path in tablespace mapping: %s\n", - cell->new_dir); + elog(ERROR, "new directory is not an absolute path in %s mapping: %s\n", + type, cell->new_dir); - if (tablespace_dirs.tail) - tablespace_dirs.tail->next = cell; + if (list->tail) + list->tail->next = cell; else - tablespace_dirs.head = cell; - tablespace_dirs.tail = cell; + list->head = cell; + list->tail = cell; +} + +/* Parse tablespace mapping */ +void +opt_tablespace_map(ConfigOption *opt, const char *arg) +{ + opt_path_map(opt, arg, &tablespace_dirs, "tablespace"); +} + +/* Parse external directories mapping */ +void +opt_externaldir_map(ConfigOption *opt, const char *arg) +{ + opt_path_map(opt, arg, &external_remap_list, "external directory"); } /* @@ -1224,11 +1286,66 @@ check_tablespace_mapping(pgBackup *backup) parray_free(links); } +void +check_external_dir_mapping(pgBackup *backup) +{ + TablespaceListCell *cell; + parray *external_dirs_to_restore; + bool found; + int i; + + if (!backup->external_dir_str) + { + if (external_remap_list.head) + elog(ERROR, "--external-mapping option's old directory doesn't " + "have an entry in list of external directories of current " + "backup: \"%s\"", external_remap_list.head->old_dir); + return; + } + + external_dirs_to_restore = make_external_directory_list(backup->external_dir_str); + for (cell = external_remap_list.head; cell; cell = cell->next) + { + char *old_dir = cell->old_dir; + + found = false; + for (i = 0; i < parray_num(external_dirs_to_restore); i++) + { + char *external_dir = parray_get(external_dirs_to_restore, i); + if (strcmp(old_dir, external_dir) == 0) + { + found = true; + break; + } + } + if (!found) + elog(ERROR, "--external-mapping option's old directory doesn't " + "have an entry in list of external directories of current " + "backup: \"%s\"", cell->old_dir); + } +} + +char * +get_external_remap(char *current_dir) +{ + TablespaceListCell *cell; + + for (cell = external_remap_list.head; cell; cell = cell->next) + { + char *old_dir = cell->old_dir; + + if (strcmp(old_dir, current_dir) == 0) + return cell->new_dir; + } + return current_dir; +} + /* * Print backup content list. */ void -print_file_list(FILE *out, const parray *files, const char *root) +print_file_list(FILE *out, const parray *files, const char *root, + const char *external_prefix, parray *external_list) { size_t i; @@ -1241,14 +1358,20 @@ print_file_list(FILE *out, const parray *files, const char *root) /* omit root directory portion */ if (root && strstr(path, root) == path) path = GetRelativePath(path, root); + else if (file->external_dir_num && !external_prefix) + { + Assert(external_list); + path = GetRelativePath(path, parray_get(external_list, + file->external_dir_num - 1)); + } fprintf(out, "{\"path\":\"%s\", \"size\":\"" INT64_FORMAT "\", " "\"mode\":\"%u\", \"is_datafile\":\"%u\", " "\"is_cfs\":\"%u\", \"crc\":\"%u\", " - "\"compress_alg\":\"%s\"", + "\"compress_alg\":\"%s\", \"external_dir_num\":\"%d\"", path, file->write_size, file->mode, file->is_datafile ? 1 : 0, file->is_cfs ? 1 : 0, file->crc, - deparse_compress_alg(file->compress_alg)); + deparse_compress_alg(file->compress_alg), file->external_dir_num); if (file->is_datafile) fprintf(out, ",\"segno\":\"%d\"", file->segno); @@ -1411,7 +1534,8 @@ bad_format: * If root is not NULL, path will be absolute path. */ parray * -dir_read_file_list(const char *root, const char *file_txt) +dir_read_file_list(const char *root, const char *external_prefix, + const char *file_txt) { FILE *fp; parray *files; @@ -1433,6 +1557,7 @@ dir_read_file_list(const char *root, const char *file_txt) mode, /* bit length of mode_t depends on platforms */ is_datafile, is_cfs, + external_dir_num, crc, segno, n_blocks; @@ -1445,8 +1570,16 @@ dir_read_file_list(const char *root, const char *file_txt) get_control_value(buf, "is_cfs", NULL, &is_cfs, false); get_control_value(buf, "crc", NULL, &crc, true); get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); + get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); - if (root) + if (external_dir_num && external_prefix) + { + char temp[MAXPGPATH]; + + makeExternalDirPathByNum(temp, external_prefix, external_dir_num); + join_path_components(filepath, temp, path); + } + else if (root) join_path_components(filepath, root, path); else strcpy(filepath, path); @@ -1459,6 +1592,7 @@ dir_read_file_list(const char *root, const char *file_txt) file->is_cfs = is_cfs ? true : false; file->crc = (pg_crc32) crc; file->compress_alg = parse_compress_alg(compress_alg_string); + file->external_dir_num = external_dir_num; /* * Optional fields @@ -1544,3 +1678,56 @@ pgFileSize(const char *path) return buf.st_size; } + +/* + * Construct parray containing external directories paths + * from string like /path1:/path2 + */ +parray * +make_external_directory_list(const char *colon_separated_dirs) +{ + char *p; + parray *list = parray_new(); + char *tmp = pg_strdup(colon_separated_dirs); + + p = strtok(tmp,":"); + while(p!=NULL) + { + if (is_absolute_path(p)) + parray_append(list, pg_strdup(p)); + else + elog(ERROR, "External directory \"%s\" is not an absolute path", p); + p=strtok(NULL,":"); + } + pfree(tmp); + parray_qsort(list, BlackListCompare); + return list; +} + +/* Free memory of parray containing strings */ +void +free_dir_list(parray *list) +{ + parray_walk(list, pfree); + parray_free(list); +} + +/* Append to string "path_prefix" int "dir_num" */ +void +makeExternalDirPathByNum(char *ret_path, const char *path_prefix, + const int dir_num) +{ + sprintf(ret_path, "%s%d", path_prefix, dir_num); +} + +/* Check if "dir" presents in "dirs_list" */ +bool +backup_contains_external(const char *dir, parray *dirs_list) +{ + void *search_result; + + if (!dirs_list) /* There is no external dirs in backup */ + return false; + search_result = parray_bsearch(dirs_list, dir, BlackListCompare); + return search_result != NULL; +} diff --git a/src/help.c b/src/help.c index 270686b0..549a1f28 100644 --- a/src/help.c +++ b/src/help.c @@ -119,16 +119,19 @@ help_pg_probackup(void) printf(_(" [--master-port=port] [--master-user=user_name]\n")); printf(_(" [--replica-timeout=timeout]\n")); printf(_(" [--skip-block-validation]\n")); + printf(_(" [--external-dirs=external-directory-path]\n")); printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n")); printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n")); printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n")); + printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--immediate] [--recovery-target-name=target-name]\n")); printf(_(" [--recovery-target-action=pause|promote|shutdown]\n")); printf(_(" [--restore-as-replica]\n")); printf(_(" [--no-validate]\n")); printf(_(" [--skip-block-validation]\n")); + printf(_(" [--skip-external-dirs]\n")); printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); @@ -207,7 +210,8 @@ help_backup(void) printf(_(" [--master-db=db_name] [--master-host=host_name]\n")); printf(_(" [--master-port=port] [--master-user=user_name]\n")); printf(_(" [--replica-timeout=timeout]\n")); - printf(_(" [--skip-block-validation]\n\n")); + printf(_(" [--skip-block-validation]\n")); + printf(_(" [-E external-dirs=external-directory-path]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); @@ -221,6 +225,8 @@ help_backup(void) printf(_(" --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n")); printf(_(" --progress show progress\n")); printf(_(" --skip-block-validation set to validate only file-level checksum\n")); + printf(_(" -E --external-dirs=external-directory-path\n")); + printf(_(" backup some directory not from pgdata \n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); @@ -282,10 +288,12 @@ help_restore(void) printf(_(" [-D pgdata-path] [-i backup-id] [--progress]\n")); printf(_(" [--time=time|--xid=xid|--lsn=lsn [--inclusive=boolean]]\n")); printf(_(" [--timeline=timeline] [-T OLDDIR=NEWDIR]\n")); + printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--immediate] [--recovery-target-name=target-name]\n")); printf(_(" [--recovery-target-action=pause|promote|shutdown]\n")); - printf(_(" [--restore-as-replica] [--no-validate]\n\n")); - printf(_(" [--skip-block-validation]\n\n")); + printf(_(" [--restore-as-replica] [--no-validate]\n")); + printf(_(" [--skip-block-validation]\n")); + printf(_(" [--skip-external-dirs]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -301,6 +309,8 @@ help_restore(void) printf(_(" --timeline=timeline recovering into a particular timeline\n")); printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n")); printf(_(" relocate the tablespace from directory OLDDIR to NEWDIR\n")); + printf(_(" --external-mapping=OLDDIR=NEWDIR\n")); + printf(_(" relocate the external directory from OLDDIR to NEWDIR\n")); printf(_(" --immediate end recovery as soon as a consistent state is reached\n")); printf(_(" --recovery-target-name=target-name\n")); @@ -313,6 +323,7 @@ help_restore(void) printf(_(" to ease setting up a standby server\n")); printf(_(" --no-validate disable backup validation during restore\n")); printf(_(" --skip-block-validation set to validate only file-level checksum\n")); + printf(_(" --skip-external-dirs do not restore all external directories\n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); @@ -490,11 +501,14 @@ help_set_config(void) printf(_(" [-d dbname] [-h host] [-p port] [-U username]\n")); printf(_(" [--master-db=db_name] [--master-host=host_name]\n")); printf(_(" [--master-port=port] [--master-user=user_name]\n")); - printf(_(" [--replica-timeout=timeout]\n\n")); - printf(_(" [--archive-timeout=timeout]\n\n")); + printf(_(" [--replica-timeout=timeout]\n")); + printf(_(" [--archive-timeout=timeout]\n")); + printf(_(" [-E external-dirs=external-directory-path]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -E --external-dirs=external-directory-path\n")); + printf(_(" backup some directory not from pgdata \n")); printf(_("\n Logging options:\n")); printf(_(" --log-level-console=log-level-console\n")); @@ -562,11 +576,14 @@ static void help_add_instance(void) { printf(_("%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n\n")); + printf(_(" --instance=instance_name\n")); + printf(_(" -E external-dirs=external-directory-path\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" --instance=instance_name name of the new instance\n")); + printf(_(" -E --external-dirs=external-directory-path\n")); + printf(_(" backup some directory not from pgdata \n")); } static void diff --git a/src/merge.c b/src/merge.c index 2967f360..5726e36c 100644 --- a/src/merge.c +++ b/src/merge.c @@ -18,12 +18,14 @@ typedef struct { parray *to_files; parray *files; + parray *from_external; pgBackup *to_backup; pgBackup *from_backup; - const char *to_root; const char *from_root; + const char *to_external_prefix; + const char *from_external_prefix; /* * Return value from the thread. @@ -34,6 +36,11 @@ typedef struct static void merge_backups(pgBackup *backup, pgBackup *next_backup); static void *merge_files(void *arg); +static void +reorder_external_dirs(pgBackup *to_backup, parray *to_external, + parray *from_external); +static int +get_external_index(const char *key, const parray *list); /* * Implementation of MERGE command. @@ -159,11 +166,15 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) *from_backup_id = base36enc_dup(from_backup->start_time); char to_backup_path[MAXPGPATH], to_database_path[MAXPGPATH], + to_external_prefix[MAXPGPATH], from_backup_path[MAXPGPATH], from_database_path[MAXPGPATH], + from_external_prefix[MAXPGPATH], control_file[MAXPGPATH]; parray *files, *to_files; + parray *to_external = NULL, + *from_external = NULL; pthread_t *threads = NULL; merge_files_arg *threads_args = NULL; int i; @@ -201,16 +212,20 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) pgBackupGetPath(to_backup, to_backup_path, lengthof(to_backup_path), NULL); pgBackupGetPath(to_backup, to_database_path, lengthof(to_database_path), DATABASE_DIR); + pgBackupGetPath(to_backup, to_external_prefix, lengthof(to_database_path), + EXTERNAL_DIR); pgBackupGetPath(from_backup, from_backup_path, lengthof(from_backup_path), NULL); pgBackupGetPath(from_backup, from_database_path, lengthof(from_database_path), DATABASE_DIR); + pgBackupGetPath(from_backup, from_external_prefix, lengthof(from_database_path), + EXTERNAL_DIR); /* * Get list of files which will be modified or removed. */ pgBackupGetPath(to_backup, control_file, lengthof(control_file), DATABASE_FILE_LIST); - to_files = dir_read_file_list(NULL, control_file); + to_files = dir_read_file_list(NULL, NULL, control_file); /* To delete from leaf, sort in reversed order */ parray_qsort(to_files, pgFileComparePathDesc); /* @@ -218,7 +233,7 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) */ pgBackupGetPath(from_backup, control_file, lengthof(control_file), DATABASE_FILE_LIST); - files = dir_read_file_list(NULL, control_file); + files = dir_read_file_list(NULL, NULL, control_file); /* sort by size for load balancing */ parray_qsort(files, pgFileCompareSize); @@ -237,11 +252,35 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); threads_args = (merge_files_arg *) palloc(sizeof(merge_files_arg) * num_threads); + /* Create external directories lists */ + if (to_backup->external_dir_str) + to_external = make_external_directory_list(to_backup->external_dir_str); + if (from_backup->external_dir_str) + from_external = make_external_directory_list(from_backup->external_dir_str); + + /* + * Rename external directoties in to_backup (if exists) + * according to numeration of external dirs in from_backup. + */ + if (to_external) + reorder_external_dirs(to_backup, to_external, from_external); + /* Setup threads */ for (i = 0; i < parray_num(files); i++) { pgFile *file = (pgFile *) parray_get(files, i); + /* if the entry was an external directory, create it in the backup */ + if (file->external_dir_num && S_ISDIR(file->mode)) + { + char dirpath[MAXPGPATH]; + char new_container[MAXPGPATH]; + + makeExternalDirPathByNum(new_container, to_external_prefix, + file->external_dir_num); + join_path_components(dirpath, new_container, file->path); + dir_create_dir(dirpath, DIR_PERMISSION); + } pg_atomic_init_flag(&file->lock); } @@ -256,6 +295,9 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) arg->from_backup = from_backup; arg->to_root = to_database_path; arg->from_root = from_database_path; + arg->from_external = from_external; + arg->to_external_prefix = to_external_prefix; + arg->from_external_prefix = from_external_prefix; /* By default there are some error */ arg->ret = 1; @@ -285,6 +327,9 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) to_backup->stop_lsn = from_backup->stop_lsn; to_backup->recovery_time = from_backup->recovery_time; to_backup->recovery_xid = from_backup->recovery_xid; + pfree(to_backup->external_dir_str); + to_backup->external_dir_str = from_backup->external_dir_str; + from_backup->external_dir_str = NULL; /* For safe pgBackupFree() */ to_backup->merge_time = merge_time; to_backup->end_time = time(NULL); @@ -312,7 +357,8 @@ merge_backups(pgBackup *to_backup, pgBackup *from_backup) else to_backup->wal_bytes = BYTES_INVALID; - write_backup_filelist(to_backup, files, from_database_path); + write_backup_filelist(to_backup, files, from_database_path, + from_external_prefix, NULL); write_backup(to_backup); delete_source_backup: @@ -330,6 +376,14 @@ delete_source_backup: { pgFile *file = (pgFile *) parray_get(to_files, i); + if (file->external_dir_num && to_external) + { + char *dir_name = parray_get(to_external, file->external_dir_num - 1); + if (backup_contains_external(dir_name, from_external)) + /* Dir already removed*/ + continue; + } + if (parray_bsearch(files, file, pgFileComparePathDesc) == NULL) { char to_file_path[MAXPGPATH]; @@ -415,7 +469,7 @@ merge_files(void *arg) i + 1, num_files, file->path); res_file = parray_bsearch(argument->to_files, file, - pgFileComparePathDesc); + pgFileComparePathWithExternalDesc); to_file = (res_file) ? *res_file : NULL; join_path_components(to_file_path, argument->to_root, file->path); @@ -453,7 +507,17 @@ merge_files(void *arg) } /* We need to make full path, file object has relative path */ - join_path_components(from_file_path, argument->from_root, file->path); + if (file->external_dir_num) + { + char temp[MAXPGPATH]; + makeExternalDirPathByNum(temp, argument->from_external_prefix, + file->external_dir_num); + + join_path_components(from_file_path, temp, file->path); + } + else + join_path_components(from_file_path, argument->from_root, + file->path); prev_file_path = file->path; file->path = from_file_path; @@ -559,6 +623,23 @@ merge_files(void *arg) file->crc = pgFileGetCRC(to_file_path, true, true, NULL); } } + else if (file->external_dir_num) + { + char from_root[MAXPGPATH]; + char to_root[MAXPGPATH]; + int new_dir_num; + char *file_external_path = parray_get(argument->from_external, + file->external_dir_num - 1); + + Assert(argument->from_external); + new_dir_num = get_external_index(file_external_path, + argument->from_external); + makeExternalDirPathByNum(from_root, argument->from_external_prefix, + file->external_dir_num); + makeExternalDirPathByNum(to_root, argument->to_external_prefix, + new_dir_num); + copy_file(from_root, to_root, file); + } else if (strcmp(file->name, "pg_control") == 0) copy_pgcontrol_file(argument->from_root, argument->to_root, file); else @@ -583,3 +664,66 @@ merge_files(void *arg) return NULL; } + +/* Recursively delete a directory and its contents */ +static void +remove_dir_with_files(const char *path) +{ + parray *files = parray_new(); + dir_list_file(files, path, true, true, true, 0); + parray_qsort(files, pgFileComparePathDesc); + for (int i = 0; i < parray_num(files); i++) + { + pgFile *file = (pgFile *) parray_get(files, i); + + pgFileDelete(file); + elog(VERBOSE, "Deleted \"%s\"", file->path); + } +} + +/* Get index of external directory */ +static int +get_external_index(const char *key, const parray *list) +{ + if (!list) /* Nowhere to search */ + return -1; + for (int i = 0; i < parray_num(list); i++) + { + if (strcmp(key, parray_get(list, i)) == 0) + return i + 1; + } + return -1; +} + +/* Rename directories in to_backup according to order in from_external */ +static void +reorder_external_dirs(pgBackup *to_backup, parray *to_external, + parray *from_external) +{ + char externaldir_template[MAXPGPATH]; + + pgBackupGetPath(to_backup, externaldir_template, + lengthof(externaldir_template), EXTERNAL_DIR); + for (int i = 0; i < parray_num(to_external); i++) + { + int from_num = get_external_index(parray_get(to_external, i), + from_external); + if (from_num == -1) + { + char old_path[MAXPGPATH]; + makeExternalDirPathByNum(old_path, externaldir_template, i + 1); + remove_dir_with_files(old_path); + } + else if (from_num != i + 1) + { + char old_path[MAXPGPATH]; + char new_path[MAXPGPATH]; + makeExternalDirPathByNum(old_path, externaldir_template, i + 1); + makeExternalDirPathByNum(new_path, externaldir_template, from_num); + elog(VERBOSE, "Rename %s to %s", old_path, new_path); + if (rename (old_path, new_path) == -1) + elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s", + old_path, new_path, strerror(errno)); + } + } +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index a56658cf..4f19f89e 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -54,6 +54,8 @@ char backup_instance_path[MAXPGPATH]; */ char arclog_path[MAXPGPATH] = ""; +/* colon separated external directories list ("/path1:/path2") */ +char *externaldir = NULL; /* common options */ static char *backup_id_string = NULL; int num_threads = 1; @@ -85,6 +87,7 @@ bool restore_as_replica = false; bool restore_no_validate = false; bool skip_block_validation = false; +bool skip_external_dirs = false; /* delete options */ bool delete_wal = false; @@ -145,6 +148,7 @@ static ConfigOption cmd_options[] = { 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT }, { 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT }, { 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT }, + { 'f', 155, "external-mapping", opt_externaldir_map, SOURCE_CMD_STRICT }, { 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT }, { 's', 141, "recovery-target-name", &target_name, SOURCE_CMD_STRICT }, { 's', 142, "recovery-target-action", &target_action, SOURCE_CMD_STRICT }, @@ -152,6 +156,7 @@ static ConfigOption cmd_options[] = { 'b', 143, "no-validate", &restore_no_validate, SOURCE_CMD_STRICT }, { 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT }, { 'b', 154, "skip-block-validation", &skip_block_validation, SOURCE_CMD_STRICT }, + { 'b', 156, "skip-external-dirs", &skip_external_dirs, SOURCE_CMD_STRICT }, /* delete options */ { 'b', 145, "wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 146, "expired", &delete_expired, SOURCE_CMD_STRICT }, diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 78d60928..2cceaaed 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -12,6 +12,7 @@ #include "postgres_fe.h" #include "libpq-fe.h" +#include "libpq-int.h" #include "access/xlog_internal.h" #include "utils/pg_crc.h" @@ -32,7 +33,7 @@ #include "datapagemap.h" /* Directory/File names */ -#define DATABASE_DIR "database" +#define DATABASE_DIR "database" #define BACKUPS_DIR "backups" #if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" @@ -50,6 +51,7 @@ #define PG_BACKUP_LABEL_FILE "backup_label" #define PG_BLACK_LIST "black_list" #define PG_TABLESPACE_MAP_FILE "tablespace_map" +#define EXTERNAL_DIR "external_directories/externaldir" /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 @@ -123,6 +125,7 @@ typedef struct pgFile int n_blocks; /* size of the file in blocks, readed during DELTA backup */ bool is_cfs; /* Flag to distinguish files compressed by CFS*/ bool is_database; + int external_dir_num; /* Number of external directory. 0 if not external */ bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */ CompressAlg compress_alg; /* compression algorithm applied to the file */ volatile pg_atomic_flag lock; /* lock for synchronization of parallel threads */ @@ -181,6 +184,7 @@ typedef struct InstanceConfig uint32 xlog_seg_size; char *pgdata; + char *external_dir_str; const char *pgdatabase; const char *pghost; const char *pgport; @@ -262,6 +266,8 @@ struct pgBackup pgBackup *parent_backup_link; char *primary_conninfo; /* Connection parameters of the backup * in the format suitable for recovery.conf */ + char *external_dir_str; /* List of external directories, + * separated by ':' */ }; /* Recovery target for restore and validate subcommands */ @@ -292,9 +298,11 @@ typedef struct { const char *from_root; const char *to_root; + const char *external_prefix; parray *files_list; parray *prev_filelist; + parray *external_dirs; XLogRecPtr prev_start_lsn; PGconn *backup_conn; @@ -372,6 +380,7 @@ extern bool exclusive_backup; /* restore options */ extern bool restore_as_replica; extern bool skip_block_validation; +extern bool skip_external_dirs; /* delete options */ extern bool delete_wal; @@ -475,7 +484,8 @@ extern pgBackup *catalog_get_last_data_backup(parray *backup_list, TimeLineID tli); extern void pgBackupWriteControl(FILE *out, pgBackup *backup); extern void write_backup_filelist(pgBackup *backup, parray *files, - const char *root); + const char *root, const char *external_prefix, + parray *external_list); extern void pgBackupGetPath(const pgBackup *backup, char *path, size_t len, const char *subdir); @@ -500,17 +510,27 @@ extern const char* deparse_compress_alg(int alg); /* in dir.c */ extern void dir_list_file(parray *files, const char *root, bool exclude, - bool omit_symlink, bool add_root); + bool omit_symlink, bool add_root, int external_dir_num); extern void create_data_directories(const char *data_dir, const char *backup_dir, bool extract_tablespaces); extern void read_tablespace_map(parray *files, const char *backup_dir); extern void opt_tablespace_map(ConfigOption *opt, const char *arg); +extern void opt_externaldir_map(ConfigOption *opt, const char *arg); extern void check_tablespace_mapping(pgBackup *backup); +extern void check_external_dir_mapping(pgBackup *backup); +extern char *get_external_remap(char *current_dir); -extern void print_file_list(FILE *out, const parray *files, const char *root); -extern parray *dir_read_file_list(const char *root, const char *file_txt); +extern void print_file_list(FILE *out, const parray *files, const char *root, + const char *external_prefix, parray *external_list); +extern parray *dir_read_file_list(const char *root, const char *external_prefix, + const char *file_txt); +extern parray *make_external_directory_list(const char *colon_separated_dirs); +extern void free_dir_list(parray *list); +extern void makeExternalDirPathByNum(char *ret_path, const char *pattern_path, + const int dir_num); +extern bool backup_contains_external(const char *dir, parray *dirs_list); extern int dir_create_dir(const char *path, mode_t mode); extern bool dir_is_empty(const char *path); @@ -518,14 +538,16 @@ extern bool dir_is_empty(const char *path); extern bool fileExists(const char *path); extern size_t pgFileSize(const char *path); -extern pgFile *pgFileNew(const char *path, bool omit_symlink); +extern pgFile *pgFileNew(const char *path, bool omit_symlink, int external_dir_num); extern pgFile *pgFileInit(const char *path); extern void pgFileDelete(pgFile *file); extern void pgFileFree(void *file); extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool raise_on_deleted, size_t *bytes_read); extern int pgFileComparePath(const void *f1, const void *f2); +extern int pgFileComparePathWithExternal(const void *f1, const void *f2); extern int pgFileComparePathDesc(const void *f1, const void *f2); +extern int pgFileComparePathWithExternalDesc(const void *f1, const void *f2); extern int pgFileCompareLinked(const void *f1, const void *f2); extern int pgFileCompareSize(const void *f1, const void *f2); diff --git a/src/restore.c b/src/restore.c index cdf60753..ae379e9f 100644 --- a/src/restore.c +++ b/src/restore.c @@ -21,6 +21,9 @@ typedef struct { parray *files; pgBackup *backup; + parray *req_external_dirs; + parray *cur_external_dirs; + char *external_prefix; /* * Return value from the thread. @@ -29,7 +32,7 @@ typedef struct int ret; } restore_files_arg; -static void restore_backup(pgBackup *backup); +static void restore_backup(pgBackup *backup, const char *external_dir_str); static void create_recovery_conf(time_t backup_id, pgRecoveryTarget *rt, pgBackup *backup); @@ -37,7 +40,6 @@ static parray *read_timeline_history(TimeLineID targetTLI); static void *restore_files(void *arg); static void remove_deleted_files(pgBackup *backup); - /* * Entry point of pg_probackup RESTORE and VALIDATE subcommands. */ @@ -281,7 +283,10 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, * i.e. empty or not exist. */ if (is_restore) + { check_tablespace_mapping(dest_backup); + check_external_dir_mapping(dest_backup); + } /* At this point we are sure that parent chain is whole * so we can build separate array, containing all needed backups, @@ -427,7 +432,7 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, if (rt->restore_no_validate && !lock_backup(backup)) elog(ERROR, "Cannot lock backup directory"); - restore_backup(backup); + restore_backup(backup, dest_backup->external_dir_str); } /* @@ -455,19 +460,23 @@ do_restore_or_validate(time_t target_backup_id, pgRecoveryTarget *rt, * Restore one backup. */ void -restore_backup(pgBackup *backup) +restore_backup(pgBackup *backup, const char *external_dir_str) { char timestamp[100]; char this_backup_path[MAXPGPATH]; char database_path[MAXPGPATH]; + char external_prefix[MAXPGPATH]; char list_path[MAXPGPATH]; parray *files; + parray *requested_external_dirs = NULL; + parray *current_external_dirs = NULL; int i; /* arrays with meta info for multi threaded backup */ pthread_t *threads; restore_files_arg *threads_args; bool restore_isok = true; + if (backup->status != BACKUP_STATUS_OK) elog(ERROR, "Backup %s cannot be restored because it is not valid", base36enc(backup->start_time)); @@ -492,23 +501,72 @@ restore_backup(pgBackup *backup) pgBackupGetPath(backup, this_backup_path, lengthof(this_backup_path), NULL); create_data_directories(instance_config.pgdata, this_backup_path, true); + if(external_dir_str && !skip_external_dirs) + { + requested_external_dirs = make_external_directory_list(external_dir_str); + for (i = 0; i < parray_num(requested_external_dirs); i++) + { + char *external_path = parray_get(requested_external_dirs, i); + external_path = get_external_remap(external_path); + dir_create_dir(external_path, DIR_PERMISSION); + } + } + + if(backup->external_dir_str) + current_external_dirs = make_external_directory_list(backup->external_dir_str); + /* * Get list of files which need to be restored. */ pgBackupGetPath(backup, database_path, lengthof(database_path), DATABASE_DIR); + pgBackupGetPath(backup, external_prefix, lengthof(external_prefix), + EXTERNAL_DIR); pgBackupGetPath(backup, list_path, lengthof(list_path), DATABASE_FILE_LIST); - files = dir_read_file_list(database_path, list_path); + files = dir_read_file_list(database_path, external_prefix, list_path); - threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - threads_args = (restore_files_arg *) palloc(sizeof(restore_files_arg)*num_threads); + /* Restore directories in do_backup_instance way */ + parray_qsort(files, pgFileComparePath); - /* setup threads */ + /* + * Make external directories before restore + * and setup threads at the same time + */ for (i = 0; i < parray_num(files); i++) { - pgFile *file = (pgFile *) parray_get(files, i); + pgFile *file = (pgFile *) parray_get(files, i); + /* If the entry was an external directory, create it in the backup */ + if (file->external_dir_num && S_ISDIR(file->mode)) + { + char dirpath[MAXPGPATH]; + char *dir_name; + char *external_path; + + if (!current_external_dirs || + parray_num(current_external_dirs) < file->external_dir_num - 1) + elog(ERROR, "Inconsistent external directory backup metadata"); + + external_path = parray_get(current_external_dirs, + file->external_dir_num - 1); + if (backup_contains_external(external_path, requested_external_dirs)) + { + char container_dir[MAXPGPATH]; + + external_path = get_external_remap(external_path); + makeExternalDirPathByNum(container_dir, external_prefix, + file->external_dir_num); + dir_name = GetRelativePath(file->path, container_dir); + elog(VERBOSE, "Create directory \"%s\"", dir_name); + join_path_components(dirpath, external_path, dir_name); + dir_create_dir(dirpath, DIR_PERMISSION); + } + } + + /* setup threads */ pg_atomic_clear_flag(&file->lock); } + threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); + threads_args = (restore_files_arg *) palloc(sizeof(restore_files_arg)*num_threads); /* Restore files into target directory */ thread_interrupted = false; @@ -518,6 +576,9 @@ restore_backup(pgBackup *backup) arg->files = files; arg->backup = backup; + arg->req_external_dirs = requested_external_dirs; + arg->cur_external_dirs = current_external_dirs; + arg->external_prefix = external_prefix; /* By default there are some error */ threads_args[i].ret = 1; @@ -561,16 +622,18 @@ remove_deleted_files(pgBackup *backup) parray *files; parray *files_restored; char filelist_path[MAXPGPATH]; + char external_prefix[MAXPGPATH]; int i; pgBackupGetPath(backup, filelist_path, lengthof(filelist_path), DATABASE_FILE_LIST); + pgBackupGetPath(backup, external_prefix, lengthof(external_prefix), EXTERNAL_DIR); /* Read backup's filelist using target database path as base path */ - files = dir_read_file_list(instance_config.pgdata, filelist_path); + files = dir_read_file_list(instance_config.pgdata, external_prefix, filelist_path); parray_qsort(files, pgFileComparePathDesc); /* Get list of files actually existing in target database */ files_restored = parray_new(); - dir_list_file(files_restored, instance_config.pgdata, true, true, false); + dir_list_file(files_restored, instance_config.pgdata, true, true, false, 0); /* To delete from leaf, sort in reversed order */ parray_qsort(files_restored, pgFileComparePathDesc); @@ -674,6 +737,17 @@ restore_files(void *arg) false, parse_program_version(arguments->backup->program_version)); } + else if (file->external_dir_num) + { + char *external_path = parray_get(arguments->cur_external_dirs, + file->external_dir_num - 1); + if (backup_contains_external(external_path, + arguments->req_external_dirs)) + { + external_path = get_external_remap(external_path); + copy_file(arguments->external_prefix, external_path, file); + } + } else if (strcmp(file->name, "pg_control") == 0) copy_pgcontrol_file(from_root, instance_config.pgdata, file); else diff --git a/src/show.c b/src/show.c index ffcd0038..083962e5 100644 --- a/src/show.c +++ b/src/show.c @@ -635,6 +635,10 @@ show_instance_json(parray *backup_list) json_add_value(buf, "primary_conninfo", backup->primary_conninfo, json_level, true); + if (backup->external_dir_str) + json_add_value(buf, "external-dirs", backup->external_dir_str, + json_level, true); + json_add_value(buf, "status", status2str(backup->status), json_level, true); diff --git a/src/validate.c b/src/validate.c index 6be5716f..7d5e94f4 100644 --- a/src/validate.c +++ b/src/validate.c @@ -44,6 +44,7 @@ void pgBackupValidate(pgBackup *backup) { char base_path[MAXPGPATH]; + char external_prefix[MAXPGPATH]; char path[MAXPGPATH]; parray *files; bool corrupted = false; @@ -99,8 +100,9 @@ pgBackupValidate(pgBackup *backup) elog(WARNING, "Invalid backup_mode of backup %s", base36enc(backup->start_time)); pgBackupGetPath(backup, base_path, lengthof(base_path), DATABASE_DIR); + pgBackupGetPath(backup, external_prefix, lengthof(external_prefix), EXTERNAL_DIR); pgBackupGetPath(backup, path, lengthof(path), DATABASE_FILE_LIST); - files = dir_read_file_list(base_path, path); + files = dir_read_file_list(base_path, external_prefix, path); /* setup threads */ for (i = 0; i < parray_num(files); i++) @@ -248,7 +250,8 @@ pgBackupValidateFiles(void *arg) * Starting from 2.0.25 we calculate crc of pg_control differently. */ if (arguments->backup_version >= 20025 && - strcmp(file->name, "pg_control") == 0) + strcmp(file->name, "pg_control") == 0 && + !file->external_dir_num) crc = get_pgcontrol_checksum(arguments->base_path); else crc = pgFileGetCRC(file->path, diff --git a/tests/__init__.py b/tests/__init__.py index fdba4164..3160d223 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -5,7 +5,7 @@ from . import init_test, merge, option_test, show_test, compatibility, \ retention_test, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \ - locking, remote + locking, remote, external def load_tests(loader, tests, pattern): @@ -40,6 +40,7 @@ def load_tests(loader, tests, pattern): suite.addTests(loader.loadTestsFromModule(pgpro560)) suite.addTests(loader.loadTestsFromModule(pgpro589)) suite.addTests(loader.loadTestsFromModule(time_stamp)) + suite.addTests(loader.loadTestsFromModule(external)) return suite diff --git a/tests/external.py b/tests/external.py new file mode 100644 index 00000000..1327402c --- /dev/null +++ b/tests/external.py @@ -0,0 +1,1240 @@ +import unittest +import os +from time import sleep +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +from .helpers.cfs_helpers import find_by_name +import shutil + + +module_name = 'external' + + +class ExternalTest(ProbackupTest, unittest.TestCase): + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_external_simple(self): + """ + make node, create external directory, take backup + with external directory, restore backup, check that + external directory was successfully copied + """ + fname = self.id().split('.')[3] + core_dir = os.path.join(self.tmp_path, module_name, fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + external_dir = self.get_tblspace_path(node, 'somedirectory') + + # create directory in external_directory + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # take FULL backup with external directory pointing to a file + file_path = os.path.join(core_dir, 'file') + open(file_path,"w+") + + try: + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=[ + '--external-dirs={0}'.format(file_path)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir point to a file" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: --external-dirs option "{0}": ' + 'directory or symbolic link expected\n'.format(file_path), + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # Fill external directories + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir, options=["-j", "4"]) + + # Full backup with external dir + self.backup_node( + backup_dir, 'node', node, + options=[ + '--external-dirs={0}'.format(external_dir)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node.cleanup() + shutil.rmtree(external_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_external_none(self): + """ + make node, create external directory, take backup + with external directory, take delta backup with --external-dirs=none, + restore delta backup, check that + external directory was not copied + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + external_dir = self.get_tblspace_path(node, 'somedirectory') + + # create directory in external_directory + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # Fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir, options=["-j", "4"]) + + # Full backup with external dir + self.backup_node( + backup_dir, 'node', node, + options=[ + '--external-dirs={0}'.format(external_dir)]) + + # Delta backup without external directory + self.backup_node( + backup_dir, 'node', node, backup_type="delta") +# options=['--external-dirs=none']) + + shutil.rmtree(external_dir, ignore_errors=True) + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node.cleanup() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_external_dir_mapping(self): + """ + make node, take full backup, check that restore with + external-dir mapping will end with error, take page backup, + check that restore with external-dir mapping will end with + success + """ + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # Fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + try: + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2_old, external_dir2_new)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because tablespace mapping is incorrect" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'ERROR: --external-mapping option' in e.message and + 'have an entry in list of external directories' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format(external_dir1_old, external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format( + external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format( + external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + # @unittest.expectedFailure + def test_backup_multiple_external(self): + """check that cmdline has priority over config""" + fname = self.id().split('.')[3] + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2'}) + + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # FULL backup + self.backup_node( + backup_dir, 'node', node, backup_type="full", + options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.set_config( + backup_dir, 'node', + options=[ + '-E', external_dir1_old]) + + # cmdline option MUST override options in config + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}".format(external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs', 'external_dir1']) + + node.cleanup() + + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=["-j", "4"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + restore delta backup, check that incremental chain + restored correctly + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + # fill external directories with changed data + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories using new binary + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE chain with new binary + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility_merge_1(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + merge delta backup ajd restore it + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=5) + + # tmp FULL backup with old binary + tmp_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge chain chain with new binary + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_backward_compatibility_merge_2(self): + """ + take backup with old binary without external dirs support + take delta backup with new binary and 2 external directories + merge delta backup ajd restore it + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir, old_binary=True) + self.show_pb(backup_dir) + + self.add_instance(backup_dir, 'node', node, old_binary=True) + self.show_pb(backup_dir) + + node.slow_start() + + node.pgbench_init(scale=5) + + # tmp FULL backup with old binary + tmp_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=tmp_id) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # delta backup with external directories using new binary + self.backup_node( + backup_dir, 'node', node, + backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1']) + pgbench.wait() + + # Fill external dirs with changed data + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, + options=['-j', '4', '--skip-external-dirs']) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, + options=['-j', '4', '--skip-external-dirs']) + + # delta backup without external directories using old binary + backup_id = self.backup_node( + backup_dir, 'node', node, + backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # Merge chain chain with new binary + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # Restore merged backup + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + + node_restored.cleanup() + + external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node_restored, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL backup with old binary without external dirs support + self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + old_binary=True, options=["-j", "4", "--stream"]) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_1(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL backup with old data + backup_id_1 = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup with new data + backup_id_2 = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + # fill external directories with old data + self.restore_node( + backup_dir, 'node', node, backup_id=backup_id_1, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, backup_id=backup_id_1, + data_dir=external_dir2_old, options=["-j", "4"]) + + # FULL backup with external directories + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + # drop old external data + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + # fill external directories with new data + self.restore_node( + backup_dir, 'node', node, backup_id=backup_id_2, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, backup_id=backup_id_2, + data_dir=external_dir2_old, options=["-j", "4"]) + + # drop now not needed backups + + # DELTA backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + # merge backups without external directories + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_single(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup with changed data + backup_id = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories using new binary + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_merge_double(self): + """""" + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=5) + + # FULL backup + self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + external_dir1_old = self.get_tblspace_path(node, 'external_dir1') + external_dir2_old = self.get_tblspace_path(node, 'external_dir2') + + pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum']) + pgbench.wait() + + # FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, + options=["-j", "4", "--stream"]) + + # fill external directories with changed data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1_old, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2_old, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # delta backup with external directories + self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + # delta backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, backup_type="delta", + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1_old, + external_dir2_old)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + shutil.rmtree(external_dir1_old, ignore_errors=True) + shutil.rmtree(external_dir2_old, ignore_errors=True) + + # delta backup without external directories + self.merge_backup(backup_dir, 'node', backup_id=backup_id) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + external_dir1_new = self.get_tblspace_path(node, 'external_dir1') + external_dir2_new = self.get_tblspace_path(node, 'external_dir2') + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", + "--external-mapping={0}={1}".format(external_dir1_old, external_dir1_new), + "--external-mapping={0}={1}".format(external_dir2_old, external_dir2_new)]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_restore_skip_external(self): + """ + Check that --skip-external-dirs works correctly + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir1 = self.get_tblspace_path(node, 'external_dir1') + external_dir2 = self.get_tblspace_path(node, 'external_dir2') + + node.pgbench_init(scale=3) + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill external directories with data + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir1, options=["-j", "4"]) + + self.restore_node( + backup_dir, 'node', node, + data_dir=external_dir2, options=["-j", "4"]) + + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # FULL backup with external directories + self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}:{1}".format( + external_dir1, + external_dir2)]) + + # delete first externals, so pgdata_compare + # will be capable of detecting redundant + # external files after restore + shutil.rmtree(external_dir1, ignore_errors=True) + shutil.rmtree(external_dir2, ignore_errors=True) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + # RESTORE + node.cleanup() + shutil.rmtree(node.base_dir, ignore_errors=True) + + self.restore_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--skip-external-dirs"]) + + pgdata_restored = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_is_symlink(self): + """ + Check that backup works correctly if external dir is symlink, + symlink pointing to external dir should be followed, + but restored as directory + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + core_dir = os.path.join(self.tmp_path, module_name, fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + node.pgbench_init(scale=3) + + # temp FULL backup + backup_id = self.backup_node( + backup_dir, 'node', node, options=["-j", "4", "--stream"]) + + # fill some directory with data + core_dir = os.path.join(self.tmp_path, module_name, fname) + symlinked_dir = os.path.join(core_dir, 'symlinked') + + self.restore_node( + backup_dir, 'node', node, + data_dir=symlinked_dir, options=["-j", "4"]) + + # drop temp FULL backup + self.delete_pb(backup_dir, 'node', backup_id=backup_id) + + # create symlink to directory in external directory + os.symlink(symlinked_dir, external_dir) + + # FULL backup with external directories + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}".format( + external_dir)]) + + pgdata = self.pgdata_content( + node.base_dir, exclude_dirs=['logs']) + + node_restored = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node_restored')) + + # RESTORE + node_restored.cleanup() + + external_dir_new = self.get_tblspace_path( + node_restored, 'external_dir') + + self.restore_node( + backup_dir, 'node', node_restored, + options=[ + "-j", "4", "--external-mapping={0}={1}".format( + external_dir, external_dir_new)]) + + pgdata_restored = self.pgdata_content( + node_restored.base_dir, exclude_dirs=['logs']) + + self.compare_pgdata(pgdata, pgdata_restored) + + self.assertEqual( + external_dir, + self.show_pb( + backup_dir, 'node', + backup_id=backup_id)['external-dirs']) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # @unittest.expectedFailure + # @unittest.skip("skip") + def test_external_dir_is_tablespace(self): + """ + Check that backup fails with error + if external directory points to tablespace + """ + fname = self.id().split('.')[3] + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + core_dir = os.path.join(self.tmp_path, module_name, fname) + shutil.rmtree(core_dir, ignore_errors=True) + node = self.make_simple_node( + base_dir=os.path.join(module_name, fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_wal_senders': '2', + 'autovacuum': 'off'}) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + external_dir = self.get_tblspace_path(node, 'external_dir') + + self.create_tblspace_in_node( + node, 'tblspace1', tblspc_path=external_dir) + + node.pgbench_init(scale=3, tablespace='tblspace1') + + # FULL backup with external directories + try: + backup_id = self.backup_node( + backup_dir, 'node', node, + options=[ + "-j", "4", "--stream", + "-E", "{0}".format( + external_dir)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because external dir points to the tablespace" + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertTrue( + 'External directory path (-E option)' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + # Clean after yourself + self.del_test_dir(module_name, fname) + + # external directory contain symlink to file + # external directory contain symlink to directory + # external directory is symlink + + # latest page backup without external_dir + + # multiple external directories + + # --external-dirs=none + + # --external-dirs point to a file + + # external directory in config and in command line + + # external directory contain multuple directories, some of them my be empty + + # forbid to external-dirs to point to tablespace directories + # check that not changed files are not copied by next backup + + # merge + + # complex merge + diff --git a/tests/merge.py b/tests/merge.py index b756ef64..0124daac 100644 --- a/tests/merge.py +++ b/tests/merge.py @@ -1475,7 +1475,8 @@ class MergeTest(ProbackupTest, unittest.TestCase): node = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'node'), set_replication=True, - initdb_params=['--data-checksums']) + initdb_params=['--data-checksums'], + pg_options={'autovacuum': 'off'}) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node)