From b37d59832f6c3dc2050879e7231228d497b4059c Mon Sep 17 00:00:00 2001 From: David Steele Date: Wed, 25 Mar 2015 15:15:55 -0400 Subject: [PATCH] v0.50: restore and much more * Added restore functionality. * All options can now be set on the command-line making pg_backrest.conf optional. * De/compression is now performed without threads and checksum/size is calculated in stream. That means file checksums are no longer optional. * Added option `--no-start-stop` to allow backups when Postgres is shut down. If `postmaster.pid` is present then `--force` is required to make the backup run (though if Postgres is running an inconsistent backup will likely be created). This option was added primarily for the purpose of unit testing, but there may be applications in the real world as well. * Fixed broken checksums and now they work with normal and resumed backups. Finally realized that checksums and checksum deltas should be functionally separated and this simplied a number of things. Issue #28 has been created for checksum deltas. * Fixed an issue where a backup could be resumed from an aborted backup that didn't have the same type and prior backup. * Removed dependency on Moose. It wasn't being used extensively and makes for longer startup times. * Checksum for backup.manifest to detect corrupted/modified manifest. * Link `latest` always points to the last backup. This has been added for convenience and to make restores simpler. * More comprehensive unit tests in all areas. --- INSTALL.md | 418 --- LICENSE | 2 +- README.md | 778 +++++- VERSION | 2 +- bin/pg_backrest.pl | 697 ++--- bin/pg_backrest_remote.pl | 109 +- doc/doc.dtd | 94 + doc/doc.pl | 761 ++++++ doc/doc.xml | 799 ++++++ doc/font/alpha_slab_one.woff | Bin 0 -> 12444 bytes doc/html/default.css | 204 ++ lib/BackRest/Backup.pm | 1028 ++++--- lib/BackRest/Config.pm | 1534 +++++++++++ lib/BackRest/Db.pm | 54 +- lib/BackRest/Exception.pm | 59 +- lib/BackRest/File.pm | 613 +++-- lib/BackRest/Manifest.pm | 751 ++++++ lib/BackRest/Remote.pm | 866 ++++-- lib/BackRest/Restore.pm | 765 ++++++ lib/BackRest/ThreadGroup.pm | 165 ++ lib/BackRest/Utility.pm | 156 +- test/data/test.archive1.bin | Bin 0 -> 16777216 bytes .../{test.archive.bin => test.archive2.bin} | Bin test/lib/BackRestTest/BackupTest.pm | 2382 +++++++++++++++-- test/lib/BackRestTest/CommonTest.pm | 508 +++- test/lib/BackRestTest/ConfigTest.pm | 816 ++++++ test/lib/BackRestTest/FileTest.pm | 335 ++- test/lib/BackRestTest/UtilityTest.pm | 63 +- test/test.pl | 146 +- 29 files changed, 11752 insertions(+), 2353 deletions(-) delete mode 100644 INSTALL.md create mode 100644 doc/doc.dtd create mode 100755 doc/doc.pl create mode 100644 doc/doc.xml create mode 100644 doc/font/alpha_slab_one.woff create mode 100644 doc/html/default.css create mode 100644 lib/BackRest/Config.pm create mode 100644 lib/BackRest/Manifest.pm create mode 100644 lib/BackRest/Restore.pm create mode 100644 lib/BackRest/ThreadGroup.pm create mode 100644 test/data/test.archive1.bin rename test/data/{test.archive.bin => test.archive2.bin} (100%) create mode 100755 test/lib/BackRestTest/ConfigTest.pm diff --git a/INSTALL.md b/INSTALL.md deleted file mode 100644 index af6bbe1d4..000000000 --- a/INSTALL.md +++ /dev/null @@ -1,418 +0,0 @@ -# PgBackRest Installation - -## sample ubuntu 12.04 install - -1. Starting from a clean install, update the OS: - -``` -apt-get update -apt-get upgrade (reboot if required) -``` - -2. Install ssh, git and cpanminus - -``` -apt-get install ssh -apt-get install git -apt-get install cpanminus -``` - -3. Install Postgres (instructions from http://www.postgresql.org/download/linux/ubuntu/) - -Create the file /etc/apt/sources.list.d/pgdg.list, and add a line for the repository: -``` -deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main -``` -Then run the following: -``` -wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - -sudo apt-get update - -apt-get install postgresql-9.3 -apt-get install postgresql-server-dev-9.3 -``` - -4. Install required Perl modules: -``` -cpanm JSON -cpanm Moose -cpanm Net::OpenSSH -cpanm DBI -cpanm DBD::Pg -cpanm IPC::System::Simple -cpanm Digest::SHA -cpanm IO::Compress::Gzip -cpanm IO::Uncompress::Gunzip -``` -5. Install PgBackRest - -Backrest can be installed by downloading the most recent release: - -https://github.com/dwsteele/pg_backrest/releases - -6. To run unit tests: - -* Create backrest_dev user -* Setup trusted ssh between test user account and backrest_dev -* Backrest user and test user must be in the same group - -## configuration examples - -PgBackRest takes some command-line parameters, but depends on a configuration file for most of the settings. The default location for the configuration file is /etc/pg_backrest.conf. - -#### confguring postgres for archiving with backrest - -Modify the following settings in postgresql.conf: -``` -wal_level = archive -archive_mode = on -archive_command = '/path/to/backrest/bin/pg_backrest.pl --stanza=db archive-push %p' -``` - -Replace the path with the actual location where PgBackRest was installed. The stanza parameter should be changed to the actual stanza name you used for your database in pg_backrest.conf. - -#### simple single host install - -This configuration is appropriate for a small installation where backups are being made locally or to a remote file system that is mounted locally. - -`/etc/pg_backrest.conf`: -``` -[global:command] -psql=/usr/bin/psql - -[global:backup] -path=/var/lib/postgresql/backup - -[global:retention] -full-retention=2 -differential-retention=2 -archive-retention-type=diff -archive-retention=2 - -[db] -path=/var/lib/postgresql/9.3/main -``` - -#### simple multiple host install - -This configuration is appropriate for a small installation where backups are being made remotely. Make sure that postgres@db-host has trusted ssh to backrest@backup-host and vice versa. - -`/etc/pg_backrest.conf on the db host`: -``` -[global:command] -psql=/usr/bin/psql - -[global:backup] -host=backup-host@mydomain.com -user=postgres -path=/var/lib/postgresql/backup - -[db] -path=/var/lib/postgresql/9.3/main -``` -`/etc/pg_backrest.conf on the backup host`: -``` -[global:command] -psql=/usr/bin/psql - -[global:backup] -path=/var/lib/postgresql/backup - -[global:retention] -full-retention=2 -archive-retention-type=full - -[db] -host=db-host@mydomain.com -user=postgres -path=/var/lib/postgresql/9.3/main -``` - -## running - -PgBackRest is intended to be run from a scheduler like cron as there is no built-in scheduler. Postgres does backup rotation, but it is not concerned with when the backups were created. So if two full backups are configured in rentention, PgBackRest will keep two full backup no matter whether they occur 2 hours apart or two weeks apart. - -There are four basic operations: - -1. Backup -``` -/path/to/pg_backrest.pl --stanza=db --type=full backup -``` -Run a `full` backup on the `db` stanza. `--type` can also be set to `incr` or `diff` for incremental or differential backups. However, if now `full` backup exists then a `full` backup will be forced even if `incr` - -2. Archive Push -``` -/path/to/pg_backrest.pl --stanza=db archive-push %p -``` -Accepts an archive file from Postgres and pushes it to the backup. `%p` is how Postgres specifies the location of the file to be archived. This command has no other purpose. - -3. Archive Get -``` -/path/to/pg_backrest.pl --stanza=db archive-get %f %p -``` -Retrieves an archive log from the backup. This is used in `restore.conf` to restore a backup to that last archive log, do PITR, or as an alternative to streaming for keep a replica up to date. `%f` is how Postgres specifies the archive log it needs, and `%p` is the location where it should be copied. - -3. Backup Expire -``` -/path/to/pg_backrest.pl --stanza=db expire -``` -Expire (rotate) any backups that exceed the defined retention. Expiration is run after every backup, so there's no need to run this command on its own unless you have reduced rentention, usually to free up some space. - -## structure - -PgBackRest stores files in a way that is easy for users to work with directly. Each backup directory has two files and two subdirectories: - -1. `backup.manifest` file - -Stores information about all the directories, links, and files in the backup. The file is plaintext and should be very clear, but documentation of the format is planned in a future release. - -2. `version` file - -Contains the PgBackRest version that was used to create the backup. - -3. `base` directory - -Contains the Postgres data directory as defined by the data_directory setting in postgresql.conf - -4. `tablespace` directory - -Contains each tablespace in a separate subdirectory. The links in `base/pg_tblspc` are rewritten to this directory. - -## restoring - -PgBackRest does not currently have a restore command - this is planned for the near future. However, PgBackRest stores backups in a way that makes restoring very easy. If `compress=n` it is even possible to start Postgres directly on the backup directory. - -In order to restore a backup, simple rsync the files from the base backup directory to your data directory. If you have used compression, then recursively ungzip the files. If you have tablespaces, repeat the process for each tablespace in the backup tablespace directory. - -It's good to practice restoring backups in advance of needing to do so. - -## configuration options - -Each section defines important aspects of the backup. All configuration sections below should be prefixed with `global:` as demonstrated in the configuration samples. - -#### command section - -The command section defines external commands that are used by PgBackRest. - -##### psql key - -Defines the full path to psql. psql is used to call pg_start_backup() and pg_stop_backup(). -``` -required: y -example: psql=/usr/bin/psql -``` -##### remote key - -Defines the file path to pg_backrest_remote.pl. - -Required only if the path to pg_backrest_remote.pl is different on the local and remote systems. If not defined, the remote path will be assumed to be the same as the local path. -``` -required: n -example: remote=/home/postgres/backrest/bin/pg_backrest_remote.pl -``` -#### command-option section - -The command-option section allows abitrary options to be passed to any command in the command section. - -##### psql key - -Allows command line parameters to be passed to psql. -``` -required: no -example: psql=--port=5433 -``` -#### log section - -The log section defines logging-related settings. The following log levels are supported: - -- `off `- No logging at all (not recommended) -- `error `- Log only errors -- `warn `- Log warnings and errors -- `info `- Log info, warnings, and errors -- `debug `- Log debug, info, warnings, and errors -- `trace `- Log trace (very verbose debugging), debug, info, warnings, and errors - -##### level-file - -Sets file log level. -``` -default: info -example: level-file=warn -``` -##### level-console - -Sets console log level. -``` -default: error -example: level-file=info -``` -#### backup section - -The backup section defines settings related to backup and archiving. - -##### host - -Sets the backup host. -``` -required: n (but must be set if user is defined) -example: host=backup.mydomain.com -``` -##### user - -Sets user account on the backup host. -``` -required: n (but must be set if host is defined) -example: user=backrest -``` -##### path - -Path where backups are stored on the local or remote host. -``` -required: y -example: path=/backup/backrest -``` -##### compress - -Enable gzip compression. Files stored in the backup are compatible with command-line gzip tools. -``` -default: y -example: compress=n -``` -##### checksum - -Enable SHA-1 checksums. Backup checksums are stored in backup.manifest while archive checksums are stored in the filename. -``` -default: y -example: checksum=n -``` -##### start_fast - -Forces an immediate checkpoint (by passing true to the fast parameter of pg_start_backup()) so the backup begins immediately. -``` -default: n -example: hardlink=y -``` -##### hardlink - -Enable hard-linking of files in differential and incremental backups to their full backups. This gives the appearance that each -backup is a full backup. Be care though, because modifying files that are hard-linked can affect all the backups in the set. -``` -default: y -example: hardlink=n -``` -##### thread-max - -Defines the number of threads to use for backup. Each thread will perform compression and transfer to make the backup run faster, but don't set `thread-max` so high that it impacts database performance. -``` -default: 1 -example: thread-max=4 -``` -##### thread-timeout - -Maximum amount of time that a backup thread should run. This limits the amount of time that a thread might be stuck due to unforeseen issues during the backup. -``` -default: -example: thread-max=4 -``` -##### archive-required - -Are archive logs required to to complete the backup? It's a good idea to leave this as the default unless you are using another -method for archiving. -``` -default: y -example: archive-required=n -``` -#### archive section - -The archive section defines parameters when doing async archiving. This means that the archive files will be stored locally, then a background process will pick them and move them to the backup. - -##### path - -Path where archive logs are stored before being asynchronously transferred to the backup. Make sure this is not the same path as the backup is using if the backup is local. -``` -required: y -example: path=/backup/archive -``` -##### compress-async - -When set then archive logs are not compressed immediately, but are instead compressed when copied to the backup host. This means that more space will be used on local storage, but the initial archive process will complete more quickly allowing greater throughput from Postgres. -``` -default: n -example: compress-async=y -``` -##### archive-max-mb - -Limits the amount of archive log that will be written locally. After the limit is reached, the following will happen: - -1. PgBackRest will notify Postgres that the archive was succesfully backed up, then DROP IT. -2. An error will be logged to the console and also to the Postgres log. -3. A stop file will be written in the lock directory and no more archive files will be backed up until it is removed. - -If this occurs then the archive log stream will be interrupted and PITR will not be possible past that point. A new backup will be required to regain full restore capability. - -The purpose of this feature is to prevent the log volume from filling up at which point Postgres will stop all operation. Better to lose the backup than have the database go down completely. - -To start normal archiving again you'll need to remove the stop file which will be located at `${archive-path}/lock/${stanza}-archive.stop` where `${archive-path}` is the path set in the archive section, and ${stanza} is the backup stanza. -``` -required: n -example: archive-max-mb=1024 -``` -#### retention section - -The rentention section defines how long backups will be retained. Expiration only occurs when the number of complete backups exceeds the allowed retention. In other words, if full-retention is set to 2, then there must be 3 complete backups before the oldest will be expired. Make sure you always have enough space for rentention + 1 backups. - -##### full-retention - -Number of full backups to keep. When a full backup expires, all differential and incremental backups associated with the full backup will also expire. When not defined then all full backups will be kept. -``` -required: n -example: full-retention=2 -``` -##### differential-retention - -Number of differential backups to keep. When a differential backup expires, all incremental backups associated with the differential backup will also expire. When not defined all differential backups will be kept. -``` -required: n -example: differential-retention=3 -``` -##### archive-retention-type - -Type of backup to use for archive retention (full or differential). If set to full, then PgBackRest will keep archive logs for the number of full backups defined by `archive-retention`. If set to differential, then PgBackRest will keep archive logs for the number of differential backups defined by `archive-retention`. - -If not defined then archive logs will be kept indefinitely. In general it is not useful to keep archive logs that are older than the oldest backup, but there may be reasons for doing so. -``` -required: n -example: archive-retention-type=full -``` -##### archive-retention - -Number of backups worth of archive log to keep. If not defined, then `full-retention` will be used when `archive-retention-type=full` and `differential-retention` will be used when `archive-retention-type=differential`. -``` -required: n -example: archive-retention=2 -``` -### stanza sections - -A stanza defines a backup for a specific database. The stanza section must define the base database path and host/user if the database is remote. Also, any global configuration sections can be overridden to define stanza-specific settings. - -##### host - -Sets the database host. -``` -required: n (but must be set if user is defined) -example: host=db.mydomain.com -``` -##### user - -Sets user account on the db host. -``` -required: n (but must be set if host is defined) -example: user=postgres -``` -##### path - -Path to the db data directory (data_directory setting in postgresql.conf). -``` -required: y -example: path=/var/postgresql/data -``` diff --git a/LICENSE b/LICENSE index ffdbcdf45..91ca7456a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2013-2014 David Steele +Copyright (c) 2013-2015 David Steele Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.md b/README.md index ff0a7df3e..5810d195a 100644 --- a/README.md +++ b/README.md @@ -2,72 +2,780 @@ PgBackRest aims to be a simple backup and restore system that can seamlessly scale up to the largest databases and workloads. -## release notes +Primary PgBackRest features: -### v0.30: core restructuring and unit tests +- Local or remote backup +- Multi-threaded backup/restore for performance +- Checksums +- Safe backups (checks that logs required for consistency are present before backup completes) +- Full, differential, and incremental backups +- Backup rotation (and minimum retention rules with optional separate retention for archive) +- In-stream compression/decompression +- Archiving and retrieval of logs for replicas/restores built in +- Async archiving for very busy systems (including space limits) +- Backup directories are consistent Postgres clusters (when hardlinks are on and compression is off) +- Tablespace support +- Restore delta option +- Restore using timestamp/size or checksum +- Restore remapping base/tablespaces -* Complete rewrite of BackRest::File module to use a custom protocol for remote operations and Perl native GZIP and SHA operations. Compression is performed in threads rather than forked processes. +Instead of relying on traditional backup tools like tar and rsync, PgBackRest implements all backup features internally and uses a custom protocol for communicating with remote systems. Removing reliance on tar and rsync allows for better solutions to database-specific backup issues. The custom remote protocol limits the types of connections that are required to perform a backup which increases security. -* Fairly comprehensive unit tests for all the basic operations. More work to be done here for sure, but then there is always more work to be done on unit tests. +## Install -* Removed dependency on Storable and replaced with a custom ini file implementation. +PgBackRest is written entirely in Perl and uses some non-standard modules that must be installed from CPAN. -* Added much needed documentation (see INSTALL.md). +### Ubuntu 12.04 -* Numerous other changes that can only be identified with a diff. +* Starting from a clean install, update the OS: +``` +apt-get update +apt-get upgrade (reboot if required) +``` +* Install ssh, git and cpanminus: +``` +apt-get install ssh +apt-get install git +apt-get install cpanminus +``` +* Install Postgres (instructions from http://www.postgresql.org/download/linux/ubuntu/) -### v0.19: improved error reporting/handling +Create the file /etc/apt/sources.list.d/pgdg.list, and add a line for the repository: +``` +deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main +``` +* Then run the following: +``` +wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - +sudo apt-get update -* Working on improving error handling in the file object. This is not complete, but works well enough to find a few errors that have been causing us problems (notably, find is occasionally failing building the archive async manifest when system is under load). +apt-get install postgresql-9.3 +apt-get install postgresql-server-dev-9.3 +``` +* Install required Perl modules: +``` +cpanm JSON +cpanm Net::OpenSSH +cpanm IPC::System::Simple +cpanm Digest::SHA +cpanm Compress::ZLib +``` +* Install PgBackRest -* Found and squashed a nasty bug where file_copy was defaulted to ignore errors. There was also an issue in file_exists that was causing the test to fail when the file actually did exist. Together they could have resulted in a corrupt backup with no errors, though it is very unlikely. +PgBackRest can be installed by downloading the most recent release: -### v0.18: return soft error from archive-get when file is missing +https://github.com/pgmasters/backrest/releases -* The archive-get function returns a 1 when the archive file is missing to differentiate from hard errors (ssh connection failure, file copy error, etc.) This lets Postgres know that that the archive stream has terminated normally. However, this does not take into account possible holes in the archive stream. +PgBackRest can be installed anywhere but it's best (though not required) to install it in the same location on all systems. -### v0.17: warn when archive directories cannot be deleted +## Operation -* If an archive directory which should be empty could not be deleted backrest was throwing an error. There's a good fix for that coming, but for the time being it has been changed to a warning so processing can continue. This was impacting backups as sometimes the final archive file would not get pushed if the first archive file had been in a different directory (plus some bad luck). +### General Options -### v0.16: RequestTTY=yes for SSH sessions +These options are either global or used by all commands. -* Added RequestTTY=yes to ssh sesssions. Hoping this will prevent random lockups. +#### `config` option -### v0.15: added archive-get +By default PgBackRest expects the its configuration file to be located at `/etc/pg_backrest.conf`. Use this option to specify another location. +``` +required: n +default: /etc/pg_backrest.conf +example: config=/var/lib/backrest/pg_backrest.conf +``` -* Added archive-get functionality to aid in restores. +#### `stanza` option -* Added option to force a checkpoint when starting the backup (start_fast=y). +Defines the stanza for the command. A stanza is the configuration for a database that defines where it is located, how it will be backed up, archiving options, etc. Most db servers will only have one Postgres cluster and therefore one stanza, whereas backup servers will have a stanza for every database that needs to be backed up. -### v0.11: minor fixes +Examples of how to configure a stanza can be found in the `configuration examples` section. +``` +required: y +example: stanza=main +``` -Tweaking a few settings after running backups for about a month. +#### `help` option -* Removed master_stderr_discard option on database SSH connections. There have been occasional lockups and they could be related issues originally seen in the file code. +Displays the PgBackRest help. +``` +required: n +``` -* Changed lock file conflicts on backup and expire commands to ERROR. They were set to DEBUG due to a copy-and-paste from the archive locks. +#### `version` option -### v0.10: backup and archiving are functional +Displays the PgBackRest version. +``` +required: n +``` -This version has been put into production at Resonate, so it does work, but there are a number of major caveats. +### Commands -* No restore functionality, but the backup directories are consistent Postgres data directories. You'll need to either uncompress the files or turn off compression in the backup. Uncompressed backups on a ZFS (or similar) filesystem are a good option because backups can be restored locally via a snapshot to create logical backups or do spot data recovery. +#### `backup` command -* Archiving is single-threaded. This has not posed an issue on our multi-terabyte databases with heavy write volume. Recommend a large WAL volume or to use the async option with a large volume nearby. +Perform a database backup. PgBackRest does not have a built-in scheduler so it's best to run it from cron or some other scheduling mechanism. -* Backups are multi-threaded, but the Net::OpenSSH library does not appear to be 100% threadsafe so it will very occasionally lock up on a thread. There is an overall process timeout that resolves this issue by killing the process. Yes, very ugly. +##### `type` option -* Checksums are lost on any resumed backup. Only the final backup will record checksum on multiple resumes. Checksums from previous backups are correctly recorded and a full backup will reset everything. +The following backup types are supported: -* The backup.manifest is being written as Storable because Config::IniFile does not seem to handle large files well. Would definitely like to save these as human-readable text. +- `full` - all database files will be copied and there will be no dependencies on previous backups. +- `incr` - incremental from the last successful backup. +- `diff` - like an incremental backup but always based on the last full backup. -* Absolutely no documentation (outside the code). Well, excepting these release notes. +``` +required: n +default: incr +example: --type=full +``` -* Lots of other little things and not so little things. Much refactoring to follow. +##### `no-start-stop` option -## recognition +This option prevents PgBackRest from running `pg_start_backup()` and `pg_stop_backup()` on the database. In order for this to work PostgreSQL should be shut down and PgBackRest will generate an error if it is not. -Primary recognition goes to Stephen Frost for all his valuable advice a criticism during the development of PgBackRest. It's a far better piece of software than it would have been without him. Any mistakes should be blamed on me alone. +The purpose of this option is to allow cold backups. The `pg_xlog` directory is copied as-is and `archive-check` is automatically disabled for the backup. +``` +required: n +default: n +``` -Resonate (http://www.resonateinsights.com) also contributed to the development of PgBackRest and allowed me to install early (but well tested) versions as their primary Postgres backup solution. Works so far! +##### `force` option + +When used with `--no-start-stop` a backup will be run even if PgBackRest thinks that PostgreSQL is running. **This option should be used with extreme care as it will likely result in a bad backup.** + +There are some scenarios where a backup might still be desirable under these conditions. For example, if a server crashes and the database volume can only be mounted read-only, it would be a good idea to take a backup even if `postmaster.pid` is present. In this case it would be better to revert to the prior backup and replay WAL, but possibly there is a very important transaction in a WAL segment that did not get archived. +``` +required: n +default: n +``` + +##### Example: Full Backup + +``` +/path/to/pg_backrest.pl --stanza=db --type=full backup +``` +Run a `full` backup on the `db` stanza. `--type` can also be set to `incr` or `diff` for incremental or differential backups. However, if no `full` backup exists then a `full` backup will be forced even if `incr` or `diff` is requested. + +#### `archive-push` command + +Archive a WAL segment to the repository. + +##### Example + +``` +/path/to/pg_backrest.pl --stanza=db archive-push %p +``` +Accepts a WAL segment from PostgreSQL and archives it in the repository. `%p` is how PostgreSQL specifies the location of the WAL segment to be archived. + +#### `archive-get` command + +Get a WAL segment from the repository. + +##### Example + +``` +/path/to/pg_backrest.pl --stanza=db archive-get %f %p +``` +Retrieves a WAL segment from the repository. This command is used in `restore.conf` to restore a backup, perform PITR, or as an alternative to streaming for keeping a replica up to date. `%f` is how PostgreSQL specifies the WAL segment it needs and `%p` is the location where it should be copied. + +#### `expire` command + +PgBackRest does backup rotation, but is not concerned with when the backups were created. So if two full backups are configured for rentention, PgBackRest will keep two full backups no matter whether they occur, two hours apart or two weeks apart. + +##### Example + +``` +/path/to/pg_backrest.pl --stanza=db expire +``` +Expire (rotate) any backups that exceed the defined retention. Expiration is run automatically after every successful backup, so there is no need to run this command separately unless you have reduced rentention, usually to free up some space. + +#### `restore` command + +Perform a database restore. This command is generall run manually, but there are instances where it might be automated. + +##### `set` option + +The backup set to be restored. `latest` will restore the latest backup, otherwise provide the name of the backup to restore. +``` +required: n +default: default +example: --set=20150131-153358F_20150131-153401I +``` + +##### `delta` option + +By default the PostgreSQL data and tablespace directories are expected to be present but empty. This option performs a delta restore using checksums. +``` +required: n +default: n +``` + +##### `force` option + +By itself this option forces the PostgreSQL data and tablespace paths to be completely overwritten. In combination with `--delta` a timestamp/size delta will be performed instead of using checksums. +``` +required: n +default: n +``` + +##### `type` option + +The following recovery types are supported: + +- `default` - recover to the end of the archive stream. +- `name` - recover the restore point specified in `--target`. +- `xid` - recover to the transaction id specified in `--target`. +- `time` - recover to the time specified in `--target`. +- `preserve` - preserve the existing `recovery.conf` file. + +``` +required: n +default: default +example: --type=xid +``` + +##### `target` option + +Defines the recovery target when `--type` is `name`, `xid`, or `time`. +``` +required: y +example: "--target=2015-01-30 14:15:11 EST" +``` + +##### `target-exclusive` option + +Defines whether recovery to the target would be exclusive (the default is inclusive) and is only valid when `--type` is `time` or `xid`. For example, using `--target-exclusive` would exclude the contents of transaction `1007` when `--type=xid` and `--target=1007`. See `recovery_target_inclusive` option in the PostgreSQL docs for more information. +``` +required: n +default: n +``` + +##### `target-resume` option + +Specifies whether recovery should resume when the recovery target is reached. See `pause_at_recovery_target` in the PostgreSQL docs for more information. +``` +required: n +default: n +``` + +##### `target-timeline` option + +Recovers along the specified timeline. See `recovery_target_timeline` in the PostgreSQL docs for more information. +``` +required: n +example: --target-timeline=3 +``` + +##### `recovery-setting` option + +Recovery settings in restore.conf options can be specified with this option. See http://www.postgresql.org/docs/X.X/static/recovery-config.html for details on restore.conf options (replace X.X with your database version). This option can be used multiple times. + +Note: `restore_command` will be automatically generated but can be overridden with this option. Be careful about specifying your own `restore_command` as PgBackRest is designed to handle this for you. Target Recovery options (recovery_target_name, recovery_target_time, etc.) are generated automatically by PgBackRest and should not be set with this option. + +Recovery settings can also be set in the `restore:recovery-setting` section of pg_backrest.conf. For example: +``` +[restore:recovery-setting] +primary_conn_info=db.mydomain.com +standby_mode=on +``` +Since PgBackRest does not start PostgreSQL after writing the `recovery.conf` file, it is always possible to edit/check `recovery.conf` before manually restarting. +``` +required: n +example: --recovery-setting primary_conninfo=db.mydomain.com +``` + +##### `tablespace-map` option + +Moves a tablespace to a new location during the restore. This is useful when tablespace locations are not the same on a replica, or an upgraded system has different mount points. + +Since PostgreSQL 9.2 tablespace locations are not stored in pg_tablespace so moving tablespaces can be done with impunity. However, moving a tablespace to the `data_directory` is not recommended and may cause problems. For more information on moving tablespaces http://www.databasesoup.com/2013/11/moving-tablespaces.html is a good resource. +``` +required: n +example: --tablespace-map ts_01=/db/ts_01 +``` + +##### Example: Restore Latest + +``` +/path/to/pg_backrest.pl --stanza=db --type=name --target=release restore +``` +Restores the latest database backup and then recovers to the `release` restore point. + +## Configuration + +PgBackRest can be used entirely with command-line parameters but a configuration file is more practical for installations that are complex or set a lot of options. The default location for the configuration file is `/etc/pg_backrest.conf`. + +### Examples + +#### Confguring Postgres for Archiving + +Modify the following settings in `postgresql.conf`: +``` +wal_level = archive +archive_mode = on +archive_command = '/path/to/backrest/bin/pg_backrest.pl --stanza=db archive-push %p' +``` +Replace the path with the actual location where PgBackRest was installed. The stanza parameter should be changed to the actual stanza name for your database. + + +#### Minimal Configuration + +The absolute minimum required to run PgBackRest (if all defaults are accepted) is the database path. + +`/etc/pg_backrest.conf`: +``` +[main] +db-path=/data/db +``` +The `db-path` option could also be provided on the command line, but it's best to use a configuration file as options tend to pile up quickly. + +#### Simple Single Host Configuration + +This configuration is appropriate for a small installation where backups are being made locally or to a remote file system that is mounted locally. A number of additional options are set: + +- `cmd-psql` - Custom location and parameters for psql. +- `cmd-psql-option` - Options for psql can be set per stanza. +- `compress` - Disable compression (handy if the file system is already compressed). +- `repo-path` - Path to the PgBackRest repository where backups and WAL archive are stored. +- `log-level-file` - Set the file log level to debug (Lots of extra info if something is not working as expected). +- `hardlink` - Create hardlinks between backups (but never between full backups). +- `thread-max` - Use 2 threads for backup/restore operations. + +`/etc/pg_backrest.conf`: +``` +[global:command] +cmd-psql=/usr/local/bin/psql -X %option% + +[global:general] +compress=n +repo-path=/Users/dsteele/Documents/Code/backrest/test/test/backrest + +[global:log] +log-level-file=debug + +[global:backup] +hardlink=y +thread-max=2 + +[main] +db-path=/data/db + +[main:command] +cmd-psql-option=--port=5433 +``` + + +#### Simple Multiple Host Configuration + +This configuration is appropriate for a small installation where backups are being made remotely. Make sure that postgres@db-host has trusted ssh to backrest@backup-host and vice versa. This configuration assumes that you have pg_backrest_remote.pl and pg_backrest.pl in the same path on both servers. + +`/etc/pg_backrest.conf` on the db host: +``` +[global:general] +repo-path=/path/to/db/repo +repo-remote-path=/path/to/backup/repo + +[global:backup] +backup-host=backup.mydomain.com +backup-user=backrest + +[global:archive] +archive-async=y + +[main] +db-path=/data/db +``` +`/etc/pg_backrest.conf` on the backup host: +``` +[global:general] +repo-path=/path/to/backup/repo + +[main] +db-host=db.mydomain.com +db-path=/data/db +db-user=postgres +``` + + +### Options + +#### `command` section + +The `command` section defines the location of external commands that are used by PgBackRest. + +##### `cmd-psql` key + +Defines the full path to `psql`. `psql` is used to call `pg_start_backup()` and `pg_stop_backup()`. + +If addtional per stanza parameters need to be passed to `psql` (such as `--port` or `--cluster`) then add `%option%` to the command line and use `command-option::psql` to set options. +``` +required: n +default: /usr/bin/psql -X +example: cmd-psql=/usr/bin/psql -X %option% +``` + +##### `cmd-psql-option` key + +Allows per stanza command line parameters to be passed to `psql`. +``` +required: n +example: cmd-psql-option --port=5433 +``` + +##### `cmd-remote` key + +Defines the location of `pg_backrest_remote.pl`. + +Required only if the path to `pg_backrest_remote.pl` is different on the local and remote systems. If not defined, the remote path will be assumed to be the same as the local path. +``` +required: n +default: same as local +example: cmd-remote=/usr/lib/backrest/bin/pg_backrest_remote.pl +``` + +#### `log` section + +The `log` section defines logging-related settings. The following log levels are supported: + +- `off` - No logging at all (not recommended) +- `error` - Log only errors +- `warn` - Log warnings and errors +- `info` - Log info, warnings, and errors +- `debug` - Log debug, info, warnings, and errors +- `trace` - Log trace (very verbose debugging), debug, info, warnings, and errors + + +##### `log-level-file` key + +Sets file log level. +``` +required: n +default: info +example: log-level-file=debug +``` + +##### `log-level-console` key + +Sets console log level. +``` +required: n +default: warn +example: log-level-console=error +``` + +#### `general` section + +The `general` section defines settings that are shared between multiple operations. + +##### `buffer-size` key + +Set the buffer size used for copy, compress, and uncompress functions. A maximum of 3 buffers will be in use at a time per thread. An additional maximum of 256K per thread may be used for zlib buffers. +``` +required: n +default: 1048576 +allow: 4096 - 8388608 +example: buffer-size=16384 +``` + +##### `compress` key + +Enable gzip compression. Backup files are compatible with command-line gzip tools. +``` +required: n +default: y +example: compress=n +``` + +##### `compress-level` key + +Sets the zlib level to be used for file compression when `compress=y`. +``` +required: n +default: 6 +allow: 0-9 +example: compress-level=9 +``` + +##### `compress-level-network` key + +Sets the zlib level to be used for protocol compression when `compress=n` and the database is not on the same host as the backup. Protocol compression is used to reduce network traffic but can be disabled by setting `compress-level-network=0`. When `compress=y` the `compress-level-network` setting is ignored and `compress-level` is used instead so that the file is only compressed once. SSH compression is always disabled. +``` +required: n +default: 3 +allow: 0-9 +example: compress-level-network=1 +``` + +##### `repo-path` key + +Path to the backrest repository where WAL segments, backups, logs, etc are stored. +``` +required: n +default: /var/lib/backup +example: repo-path=/data/db/backrest +``` + +##### `repo-remote-path` key + +Path to the remote backrest repository where WAL segments, backups, logs, etc are stored. +``` +required: n +example: repo-remote-path=/backup/backrest +``` + +#### `backup` section + +The `backup` section defines settings related to backup. + +##### `backup-host` key + +Sets the backup host when backup up remotely via SSH. Make sure that trusted SSH authentication is configured between the db host and the backup host. + +When backing up to a locally mounted network filesystem this setting is not required. +``` +required: n +example: backup-host=backup.domain.com +``` + +##### `backup-user` key + +Sets user account on the backup host. +``` +required: n +example: backup-user=backrest +``` + +##### `start-fast` key + +Forces a checkpoint (by passing `true` to the `fast` parameter of `pg_start_backup()`) so the backup begins immediately. +``` +required: n +default: n +example: start-fast=y +``` + +##### `hardlink` key + +Enable hard-linking of files in differential and incremental backups to their full backups. This gives the appearance that each backup is a full backup. Be careful, though, because modifying files that are hard-linked can affect all the backups in the set. +``` +required: n +default: n +example: hardlink=y +``` + +##### `thread-max` key + +Defines the number of threads to use for backup or restore. Each thread will perform compression and transfer to make the backup run faster, but don't set `thread-max` so high that it impacts database performance during backup. +``` +required: n +default: 1 +example: thread-max=4 +``` + +##### `thread-timeout` key + +Maximum amount of time (in seconds) that a backup thread should run. This limits the amount of time that a thread might be stuck due to unforeseen issues during the backup. Has no affect when `thread-max=1`. +``` +required: n +example: thread-timeout=3600 +``` + +##### `archive-check` key + +Checks that all WAL segments required to make the backup consistent are present in the WAL archive. It's a good idea to leave this as the default unless you are using another method for archiving. +``` +required: n +default: y +example: archive-check=n +``` + +##### `archive-copy` key + +Store WAL segments required to make the backup consistent in the backup's pg_xlog path. This slightly paranoid option protects against corruption or premature expiration in the WAL segment archive. PITR won't be possible without the WAL segment archive and this option also consumes more space. +``` +required: n +default: n +example: archive-copy=y +``` + +#### `archive` section + +The `archive` section defines parameters when doing async archiving. This means that the archive files will be stored locally, then a background process will pick them and move them to the backup. + +##### `archive-async` key + +Archive WAL segments asynchronously. WAL segments will be copied to the local repo, then a process will be forked to compress the segment and transfer it to the remote repo if configured. Control will be returned to PostgreSQL as soon as the WAL segment is copied locally. +``` +required: n +default: n +example: archive-async=y +``` + +##### `archive-max-mb` key + +Limits the amount of archive log that will be written locally when `compress-async=y`. After the limit is reached, the following will happen: + +- PgBackRest will notify Postgres that the archive was succesfully backed up, then DROP IT. +- An error will be logged to the console and also to the Postgres log. +- A stop file will be written in the lock directory and no more archive files will be backed up until it is removed. + +If this occurs then the archive log stream will be interrupted and PITR will not be possible past that point. A new backup will be required to regain full restore capability. + +The purpose of this feature is to prevent the log volume from filling up at which point Postgres will stop completely. Better to lose the backup than have the database go down. + +To start normal archiving again you'll need to remove the stop file which will be located at `${archive-path}/lock/${stanza}-archive.stop` where `${archive-path}` is the path set in the `archive` section, and `${stanza}` is the backup stanza. +``` +required: n +example: archive-max-mb=1024 +``` + +#### `expire` section + +The `expire` section defines how long backups will be retained. Expiration only occurs when the number of complete backups exceeds the allowed retention. In other words, if full-retention is set to 2, then there must be 3 complete backups before the oldest will be expired. Make sure you always have enough space for rentention + 1 backups. + +##### `retention-full` key + +Number of full backups to keep. When a full backup expires, all differential and incremental backups associated with the full backup will also expire. When not defined then all full backups will be kept. +``` +required: n +example: retention-full=2 +``` + +##### `retention-diff` key + +Number of differential backups to keep. When a differential backup expires, all incremental backups associated with the differential backup will also expire. When not defined all differential backups will be kept. +``` +required: n +example: retention-diff=3 +``` + +##### `retention-archive-type` key + +Type of backup to use for archive retention (full or differential). If set to full, then PgBackRest will keep archive logs for the number of full backups defined by `archive-retention`. If set to differential, then PgBackRest will keep archive logs for the number of differential backups defined by `archive-retention`. + +If not defined then archive logs will be kept indefinitely. In general it is not useful to keep archive logs that are older than the oldest backup, but there may be reasons for doing so. +``` +required: n +default: full +example: retention-archive-type=diff +``` + +##### `retention-archive` key + +Number of backups worth of archive log to keep. +``` +required: n +example: retention-archive=2 +``` + +#### `stanza` section + +A stanza defines a backup for a specific database. The stanza section must define the base database path and host/user if the database is remote. Also, any global configuration sections can be overridden to define stanza-specific settings. + +##### `db-host` key + +Define the database host. Used for backups where the database host is different from the backup host. +``` +required: n +example: db-host=db.domain.com +``` + +##### `db-user` key + +Defines user account on the db host when `db-host` is defined. +``` +required: n +example: db-user=postgres +``` + +##### `db-path` key + +Path to the db data directory (data_directory setting in postgresql.conf). +``` +required: y +example: db-path=/data/db +``` + +## Release Notes + +### v0.50: restore and much more + +- Added restore functionality. + +- All options can now be set on the command-line making pg_backrest.conf optional. + +- De/compression is now performed without threads and checksum/size is calculated in stream. That means file checksums are no longer optional. + +- Added option `--no-start-stop` to allow backups when Postgres is shut down. If `postmaster.pid` is present then `--force` is required to make the backup run (though if Postgres is running an inconsistent backup will likely be created). This option was added primarily for the purpose of unit testing, but there may be applications in the real world as well. + +- Fixed broken checksums and now they work with normal and resumed backups. Finally realized that checksums and checksum deltas should be functionally separated and this simplied a number of things. Issue #28 has been created for checksum deltas. + +- Fixed an issue where a backup could be resumed from an aborted backup that didn't have the same type and prior backup. + +- Removed dependency on Moose. It wasn't being used extensively and makes for longer startup times. + +- Checksum for backup.manifest to detect corrupted/modified manifest. + +- Link `latest` always points to the last backup. This has been added for convenience and to make restores simpler. + +- More comprehensive unit tests in all areas. + +### v0.30: Core Restructuring and Unit Tests + +- Complete rewrite of BackRest::File module to use a custom protocol for remote operations and Perl native GZIP and SHA operations. Compression is performed in threads rather than forked processes. + +- Fairly comprehensive unit tests for all the basic operations. More work to be done here for sure, but then there is always more work to be done on unit tests. + +- Removed dependency on Storable and replaced with a custom ini file implementation. + +- Added much needed documentation + +- Numerous other changes that can only be identified with a diff. + +### v0.19: Improved Error Reporting/Handling + +- Working on improving error handling in the file object. This is not complete, but works well enough to find a few errors that have been causing us problems (notably, find is occasionally failing building the archive async manifest when system is under load). + +- Found and squashed a nasty bug where `file_copy()` was defaulted to ignore errors. There was also an issue in file_exists that was causing the test to fail when the file actually did exist. Together they could have resulted in a corrupt backup with no errors, though it is very unlikely. + +### v0.18: Return Soft Error When Archive Missing + +- The `archive-get` operation returns a 1 when the archive file is missing to differentiate from hard errors (ssh connection failure, file copy error, etc.) This lets Postgres know that that the archive stream has terminated normally. However, this does not take into account possible holes in the archive stream. + +### v0.17: Warn When Archive Directories Cannot Be Deleted + +- If an archive directory which should be empty could not be deleted backrest was throwing an error. There's a good fix for that coming, but for the time being it has been changed to a warning so processing can continue. This was impacting backups as sometimes the final archive file would not get pushed if the first archive file had been in a different directory (plus some bad luck). + +### v0.16: RequestTTY=yes for SSH Sessions + +- Added `RequestTTY=yes` to ssh sesssions. Hoping this will prevent random lockups. + +### v0.15: RequestTTY=yes for SSH Sessions + +- Added archive-get functionality to aid in restores. + +- Added option to force a checkpoint when starting the backup `start-fast=y`. + +### v0.11: Minor Fixes + +- Removed `master_stderr_discard` option on database SSH connections. There have been occasional lockups and they could be related to issues originally seen in the file code. + +- Changed lock file conflicts on backup and expire commands to ERROR. They were set to DEBUG due to a copy-and-paste from the archive locks. + +### v0.10: Backup and Archiving are Functional + +- No restore functionality, but the backup directories are consistent Postgres data directories. You'll need to either uncompress the files or turn off compression in the backup. Uncompressed backups on a ZFS (or similar) filesystem are a good option because backups can be restored locally via a snapshot to create logical backups or do spot data recovery. + +- Archiving is single-threaded. This has not posed an issue on our multi-terabyte databases with heavy write volume. Recommend a large WAL volume or to use the async option with a large volume nearby. + +- Backups are multi-threaded, but the Net::OpenSSH library does not appear to be 100% threadsafe so it will very occasionally lock up on a thread. There is an overall process timeout that resolves this issue by killing the process. Yes, very ugly. + +- Checksums are lost on any resumed backup. Only the final backup will record checksum on multiple resumes. Checksums from previous backups are correctly recorded and a full backup will reset everything. + +- The backup.manifest is being written as Storable because Config::IniFile does not seem to handle large files well. Would definitely like to save these as human-readable text. + +- Absolutely no documentation (outside the code). Well, excepting these release notes. + +## Recognition + +Primary recognition goes to Stephen Frost for all his valuable advice and criticism during the development of PgBackRest. + +Resonate (http://www.resonate.com/) also contributed to the development of PgBackRest and allowed me to install early (but well tested) versions as their primary Postgres backup solution. diff --git a/VERSION b/VERSION index f7c6c31b6..c49766cb9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.30 +0.50 diff --git a/bin/pg_backrest.pl b/bin/pg_backrest.pl index 1b9ffd65e..f2f95314b 100755 --- a/bin/pg_backrest.pl +++ b/bin/pg_backrest.pl @@ -6,20 +6,17 @@ #################################################################################################################################### # Perl includes #################################################################################################################################### -use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; -use Getopt::Long; -use Pod::Usage; use lib dirname($0) . '/../lib'; use BackRest::Utility; +use BackRest::Config; +use BackRest::Remote; use BackRest::File; -use BackRest::Backup; -use BackRest::Db; #################################################################################################################################### # Usage @@ -33,10 +30,11 @@ pg_backrest.pl - Simple Postgres Backup and Restore pg_backrest.pl [options] [operation] - Operation: + Operations: archive-get retrieve an archive file from backup archive-push push an archive file to backup backup backup a cluster + restore restore a cluster expire expire old backups (automatically run after backup) General Options: @@ -47,171 +45,98 @@ pg_backrest.pl [options] [operation] Backup Options: --type type of backup to perform (full, diff, incr) + --no-start-stop do not call pg_start/stop_backup(). Postmaster should not be running. + --force force backup when --no-start-stop passed and postmaster.pid exists. + Use with extreme caution as this will probably produce an inconsistent backup! + + Restore Options: + --set backup set to restore (defaults to latest set). + --delta perform a delta restore. + --force force a restore and overwrite all existing files. + with --delta forces size/timestamp deltas. + + Recovery Options: + --type type of recovery: + default - recover to end of archive log stream + name - restore point target + time - timestamp target + xid - transaction id target + preserve - preserve the existing recovery.conf + none - no recovery past database becoming consistent + --target recovery target if type is name, time, or xid. + --target-exclusive stop just before the recovery target (default is inclusive). + --target-resume do not pause after recovery (default is to pause). + --target-timeline recover into specified timeline (default is current timeline). + =cut -#################################################################################################################################### -# Operation constants - basic operations that are allowed in backrest -#################################################################################################################################### -use constant -{ - OP_ARCHIVE_GET => 'archive-get', - OP_ARCHIVE_PUSH => 'archive-push', - OP_BACKUP => 'backup', - OP_EXPIRE => 'expire' -}; - -#################################################################################################################################### -# Configuration constants - configuration sections and keys -#################################################################################################################################### -use constant -{ - CONFIG_SECTION_COMMAND => 'command', - CONFIG_SECTION_COMMAND_OPTION => 'command:option', - CONFIG_SECTION_LOG => 'log', - CONFIG_SECTION_BACKUP => 'backup', - CONFIG_SECTION_ARCHIVE => 'archive', - CONFIG_SECTION_RETENTION => 'retention', - CONFIG_SECTION_STANZA => 'stanza', - - CONFIG_KEY_USER => 'user', - CONFIG_KEY_HOST => 'host', - CONFIG_KEY_PATH => 'path', - - CONFIG_KEY_THREAD_MAX => 'thread-max', - CONFIG_KEY_THREAD_TIMEOUT => 'thread-timeout', - CONFIG_KEY_HARDLINK => 'hardlink', - CONFIG_KEY_ARCHIVE_REQUIRED => 'archive-required', - CONFIG_KEY_ARCHIVE_MAX_MB => 'archive-max-mb', - CONFIG_KEY_START_FAST => 'start-fast', - CONFIG_KEY_COMPRESS_ASYNC => 'compress-async', - - CONFIG_KEY_LEVEL_FILE => 'level-file', - CONFIG_KEY_LEVEL_CONSOLE => 'level-console', - - CONFIG_KEY_COMPRESS => 'compress', - CONFIG_KEY_CHECKSUM => 'checksum', - CONFIG_KEY_PSQL => 'psql', - CONFIG_KEY_REMOTE => 'remote', - - CONFIG_KEY_FULL_RETENTION => 'full-retention', - CONFIG_KEY_DIFFERENTIAL_RETENTION => 'differential-retention', - CONFIG_KEY_ARCHIVE_RETENTION_TYPE => 'archive-retention-type', - CONFIG_KEY_ARCHIVE_RETENTION => 'archive-retention' -}; - -#################################################################################################################################### -# Command line parameters -#################################################################################################################################### -my $strConfigFile; # Configuration file -my $strStanza; # Stanza in the configuration file to load -my $strType; # Type of backup: full, differential (diff), incremental (incr) -my $bVersion = false; # Display version and exit -my $bHelp = false; # Display help and exit - -# Test parameters - not for general use -my $bNoFork = false; # Prevents the archive process from forking when local archiving is enabled -my $bTest = false; # Enters test mode - not harmful in anyway, but adds special logging and pauses for unit testing -my $iTestDelay = 5; # Amount of time to delay after hitting a test point (the default would not be enough for manual tests) - -GetOptions ('config=s' => \$strConfigFile, - 'stanza=s' => \$strStanza, - 'type=s' => \$strType, - 'version' => \$bVersion, - 'help' => \$bHelp, - - # Test parameters - not for general use (and subject to change without notice) - 'no-fork' => \$bNoFork, - 'test' => \$bTest, - 'test-delay=s' => \$iTestDelay) - or pod2usage(2); - -# Display version and exit if requested -if ($bVersion || $bHelp) -{ - print 'pg_backrest ' . version_get() . "\n"; - - if (!$bHelp) - { - exit 0; - } -} - -# Display help and exit if requested -if ($bHelp) -{ - print "\n"; - pod2usage(); -} - -# Set test parameters -test_set($bTest, $iTestDelay); - #################################################################################################################################### # Global variables #################################################################################################################################### -my %oConfig; # Configuration hash -my $oRemote; # Remote object +my $oRemote; # Remote protocol object +my $oLocal; # Local protocol object my $strRemote; # Defines which side is remote, DB or BACKUP #################################################################################################################################### -# CONFIG_LOAD - Get a value from the config and be sure that it is defined (unless bRequired is false) +# REMOTE_GET - Get the remote object or create it if not exists #################################################################################################################################### -sub config_key_load +sub remote_get { - my $strSection = shift; - my $strKey = shift; - my $bRequired = shift; - my $strDefault = shift; + my $bForceLocal = shift; + my $iCompressLevel = shift; + my $iCompressLevelNetwork = shift; - # Default is that the key is not required - if (!defined($bRequired)) + # Return the remote if is already defined + if (defined($oRemote)) { - $bRequired = false; + return $oRemote; } - my $strValue; - - # Look in the default stanza section - if ($strSection eq CONFIG_SECTION_STANZA) + # Return the remote when required + if ($strRemote ne NONE && !$bForceLocal) { - $strValue = $oConfig{"${strStanza}"}{"${strKey}"}; - } - # Else look in the supplied section - else - { - # First check the stanza section - $strValue = $oConfig{"${strStanza}:${strSection}"}{"${strKey}"}; + $oRemote = new BackRest::Remote + ( + $strRemote eq DB ? optionGet(OPTION_DB_HOST) : optionGet(OPTION_BACKUP_HOST), + $strRemote eq DB ? optionGet(OPTION_DB_USER) : optionGet(OPTION_BACKUP_USER), + optionGet(OPTION_COMMAND_REMOTE), + optionGet(OPTION_BUFFER_SIZE), + $iCompressLevel, $iCompressLevelNetwork + ); - # If the stanza section value is undefined then check global - if (!defined($strValue)) - { - $strValue = $oConfig{"global:${strSection}"}{"${strKey}"}; - } + return $oRemote; } - if (!defined($strValue) && $bRequired) + # Otherwise return local + if (!defined($oLocal)) { - if (defined($strDefault)) - { - return $strDefault; - } - - confess &log(ERROR, 'config value ' . (defined($strSection) ? $strSection : '[stanza]') . "->${strKey} is undefined"); + $oLocal = new BackRest::Remote + ( + undef, undef, undef, + optionGet(OPTION_BUFFER_SIZE), + $iCompressLevel, $iCompressLevelNetwork + ); } - if ($strSection eq CONFIG_SECTION_COMMAND) - { - my $strOption = config_key_load(CONFIG_SECTION_COMMAND_OPTION, $strKey); - - if (defined($strOption)) - { - $strValue =~ s/\%option\%/${strOption}/g; - } - } - - return $strValue; + return $oLocal; } +#################################################################################################################################### +# SAFE_EXIT - terminate all SSH sessions when the script is terminated +#################################################################################################################################### +sub safe_exit +{ + remote_exit(); + + my $iTotal = backup_thread_kill(); + + confess &log(ERROR, "process was terminated on signal, ${iTotal} threads stopped"); +} + +$SIG{TERM} = \&safe_exit; +$SIG{HUP} = \&safe_exit; +$SIG{INT} = \&safe_exit; + #################################################################################################################################### # REMOTE_EXIT - Close the remote object if it exists #################################################################################################################################### @@ -230,101 +155,32 @@ sub remote_exit } } -#################################################################################################################################### -# REMOTE_GET - Get the remote object or create it if not exists -#################################################################################################################################### -sub remote_get() -{ - if (!defined($oRemote) && $strRemote ne REMOTE_NONE) - { - $oRemote = BackRest::Remote->new - ( - strHost => config_key_load($strRemote eq REMOTE_DB ? CONFIG_SECTION_STANZA : CONFIG_SECTION_BACKUP, CONFIG_KEY_HOST, true), - strUser => config_key_load($strRemote eq REMOTE_DB ? CONFIG_SECTION_STANZA : CONFIG_SECTION_BACKUP, CONFIG_KEY_USER, true), - strCommand => config_key_load(CONFIG_SECTION_COMMAND, CONFIG_KEY_REMOTE, true) - ); - } - - return $oRemote; -} - -#################################################################################################################################### -# SAFE_EXIT - terminate all SSH sessions when the script is terminated -#################################################################################################################################### -sub safe_exit -{ - remote_exit(); - - my $iTotal = backup_thread_kill(); - - confess &log(ERROR, "process was terminated on signal, ${iTotal} threads stopped"); -} - -$SIG{TERM} = \&safe_exit; -$SIG{HUP} = \&safe_exit; -$SIG{INT} = \&safe_exit; - #################################################################################################################################### # START EVAL BLOCK TO CATCH ERRORS AND STOP THREADS #################################################################################################################################### eval { #################################################################################################################################### -# START MAIN +# Load command line parameters and config #################################################################################################################################### -# Get the operation -my $strOperation = $ARGV[0]; - -# Validate the operation -if (!defined($strOperation)) -{ - confess &log(ERROR, 'operation is not defined'); -} - -if ($strOperation ne OP_ARCHIVE_GET && - $strOperation ne OP_ARCHIVE_PUSH && - $strOperation ne OP_BACKUP && - $strOperation ne OP_EXPIRE) -{ - confess &log(ERROR, "invalid operation ${strOperation}"); -} - -# Type should only be specified for backups -if (defined($strType) && $strOperation ne OP_BACKUP) -{ - confess &log(ERROR, 'type can only be specified for the backup operation') -} - -#################################################################################################################################### -# LOAD CONFIG FILE -#################################################################################################################################### -if (!defined($strConfigFile)) -{ - $strConfigFile = '/etc/pg_backrest.conf'; -} - -config_load($strConfigFile, \%oConfig); - -# Load and check the cluster -if (!defined($strStanza)) -{ - confess 'a backup stanza must be specified'; -} +configLoad(); # Set the log levels -log_level_set(uc(config_key_load(CONFIG_SECTION_LOG, CONFIG_KEY_LEVEL_FILE, true, INFO)), - uc(config_key_load(CONFIG_SECTION_LOG, CONFIG_KEY_LEVEL_CONSOLE, true, ERROR))); +log_level_set(optionGet(OPTION_LOG_LEVEL_FILE), optionGet(OPTION_LOG_LEVEL_CONSOLE)); + +# Set test options +!optionGet(OPTION_TEST) or test_set(optionGet(OPTION_TEST), optionGet(OPTION_TEST_DELAY)); #################################################################################################################################### # DETERMINE IF THERE IS A REMOTE #################################################################################################################################### # First check if backup is remote -if (defined(config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_HOST))) +if (optionTest(OPTION_BACKUP_HOST)) { - $strRemote = REMOTE_BACKUP; + $strRemote = BACKUP; } # Else check if db is remote -elsif (defined(config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_HOST))) +elsif (optionTest(OPTION_DB_HOST)) { # Don't allow both sides to be remote if (defined($strRemote)) @@ -332,46 +188,34 @@ elsif (defined(config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_HOST))) confess &log(ERROR, 'db and backup cannot both be configured as remote'); } - $strRemote = REMOTE_DB; + $strRemote = DB; } else { - $strRemote = REMOTE_NONE; + $strRemote = NONE; } #################################################################################################################################### # ARCHIVE-PUSH Command #################################################################################################################################### -if ($strOperation eq OP_ARCHIVE_PUSH) +if (operationTest(OP_ARCHIVE_PUSH)) { # Make sure the archive push operation happens on the db side - if ($strRemote eq REMOTE_DB) + if ($strRemote eq DB) { confess &log(ERROR, 'archive-push operation must run on the db host'); } # If an archive section has been defined, use that instead of the backup section when operation is OP_ARCHIVE_PUSH - my $bArchiveLocal = defined(config_key_load(CONFIG_SECTION_ARCHIVE, CONFIG_KEY_PATH)); - my $strSection = $bArchiveLocal ? CONFIG_SECTION_ARCHIVE : CONFIG_SECTION_BACKUP; - my $strArchivePath = config_key_load($strSection, CONFIG_KEY_PATH); - - # Get checksum flag - my $bChecksum = config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_CHECKSUM, true, 'y') eq 'y' ? true : false; - - # Get the async compress flag. If compress_async=y then compression is off for the initial push when archiving locally - my $bCompressAsync = false; - - if ($bArchiveLocal) - { - config_key_load($strSection, CONFIG_KEY_COMPRESS_ASYNC, true, 'n') eq 'n' ? false : true; - } + my $bArchiveAsync = optionTest(OPTION_ARCHIVE_ASYNC); + my $strArchivePath = optionGet(OPTION_REPO_PATH); # If logging locally then create the stop archiving file name my $strStopFile; - if ($bArchiveLocal) + if ($bArchiveAsync) { - $strStopFile = "${strArchivePath}/lock/${strStanza}-archive.stop"; + $strStopFile = "${strArchivePath}/lock/" . optionGet(OPTION_STANZA) . "-archive.stop"; } # If an archive file is defined, then push it @@ -388,15 +232,16 @@ if ($strOperation eq OP_ARCHIVE_PUSH) } # Get the compress flag - my $bCompress = $bCompressAsync ? false : config_key_load($strSection, CONFIG_KEY_COMPRESS, true, 'y') eq 'y' ? true : false; + my $bCompress = $bArchiveAsync ? false : optionGet(OPTION_COMPRESS); # Create the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strRemote => $bArchiveLocal ? REMOTE_NONE : $strRemote, - oRemote => $bArchiveLocal ? undef : remote_get(), - strBackupPath => config_key_load($strSection, CONFIG_KEY_PATH, true) + optionGet(OPTION_STANZA), + $bArchiveAsync || $strRemote eq NONE ? optionGet(OPTION_REPO_PATH) : optionGet(OPTION_REPO_REMOTE_PATH), + $bArchiveAsync ? NONE : $strRemote, + remote_get($bArchiveAsync, optionGet(OPTION_COMPRESS_LEVEL), + optionGet(OPTION_COMPRESS_LEVEL_NETWORK)) ); # Init backup @@ -406,45 +251,36 @@ if ($strOperation eq OP_ARCHIVE_PUSH) $oFile, undef, $bCompress, - undef, - !$bChecksum + undef ); - &log(INFO, 'pushing archive log ' . $ARGV[1] . ($bArchiveLocal ? ' asynchronously' : '')); + &log(INFO, 'pushing archive log ' . $ARGV[1] . ($bArchiveAsync ? ' asynchronously' : '')); - archive_push(config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_PATH), $ARGV[1]); + archive_push(optionGet(OPTION_DB_PATH, false), $ARGV[1], $bArchiveAsync); - # Exit if we are archiving local but no backup host has been defined - if (!($bArchiveLocal && defined(config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_HOST)))) + # Exit if we are archiving async + if (!$bArchiveAsync) { remote_exit(0); } # Fork and exit the parent process so the async process can continue - if (!$bNoFork) + if (!optionTest(OPTION_TEST_NO_FORK) && fork()) { - if (fork()) - { - remote_exit(0); - } + remote_exit(0); } # Else the no-fork flag has been specified for testing else { &log(INFO, 'No fork on archive local for TESTING'); } - } - # If no backup host is defined it makes no sense to run archive-push without a specified archive file so throw an error - if (!defined(config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_HOST))) - { - &log(ERROR, 'archive-push called without an archive file or backup host'); + # Start the async archive push + &log(INFO, 'starting async archive-push'); } - &log(INFO, 'starting async archive-push'); - # Create a lock file to make sure async archive-push does not run more than once - my $strLockPath = "${strArchivePath}/lock/${strStanza}-archive.lock"; + my $strLockPath = "${strArchivePath}/lock/" . optionGet(OPTION_STANZA) . "-archive.lock"; if (!lock_file_create($strLockPath)) { @@ -453,95 +289,52 @@ if ($strOperation eq OP_ARCHIVE_PUSH) } # Build the basic command string that will be used to modify the command during processing - my $strCommand = $^X . ' ' . $0 . " --stanza=${strStanza}"; + my $strCommand = $^X . ' ' . $0 . " --stanza=" . optionGet(OPTION_STANZA); # Get the new operational flags - my $bCompress = config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_COMPRESS, true, 'y') eq 'y' ? true : false; - my $iArchiveMaxMB = config_key_load(CONFIG_SECTION_ARCHIVE, CONFIG_KEY_ARCHIVE_MAX_MB); + my $bCompress = optionGet(OPTION_COMPRESS); + my $iArchiveMaxMB = optionGet(OPTION_ARCHIVE_MAX_MB, false); - # eval - # { - # Create the file object - my $oFile = BackRest::File->new - ( - strStanza => $strStanza, - strRemote => $strRemote, - oRemote => remote_get(), - strBackupPath => config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true) - ); + # Create the file object + my $oFile = new BackRest::File + ( + optionGet(OPTION_STANZA), + $strRemote eq NONE ? optionGet(OPTION_REPO_PATH) : optionGet(OPTION_REPO_REMOTE_PATH), + $strRemote, + remote_get(false, optionGet(OPTION_COMPRESS_LEVEL), + optionGet(OPTION_COMPRESS_LEVEL_NETWORK)) + ); - # Init backup - backup_init - ( - undef, - $oFile, - undef, - $bCompress, - undef, - !$bChecksum, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_MAX), - undef, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_TIMEOUT) - ); + # Init backup + backup_init + ( + undef, + $oFile, + undef, + $bCompress, + undef, + 1, #optionGet(OPTION_THREAD_MAX), + undef, + optionGet(OPTION_THREAD_TIMEOUT, false) + ); - # Call the archive_xfer function and continue to loop as long as there are files to process - my $iLogTotal; + # Call the archive_xfer function and continue to loop as long as there are files to process + my $iLogTotal; - while (!defined($iLogTotal) || $iLogTotal > 0) + while (!defined($iLogTotal) || $iLogTotal > 0) + { + $iLogTotal = archive_xfer($strArchivePath . "/archive/" . optionGet(OPTION_STANZA) . "/out", $strStopFile, + $strCommand, $iArchiveMaxMB); + + if ($iLogTotal > 0) { - $iLogTotal = archive_xfer($strArchivePath . "/archive/${strStanza}", $strStopFile, $strCommand, $iArchiveMaxMB); - - if ($iLogTotal > 0) - { - &log(DEBUG, "${iLogTotal} archive logs were transferred, calling archive_xfer() again"); - } - else - { - &log(DEBUG, 'no more logs to transfer - exiting'); - } + &log(DEBUG, "${iLogTotal} archive logs were transferred, calling archive_xfer() again"); } - # - # }; - - # # If there were errors above then start compressing - # if ($@) - # { - # if ($bCompressAsync) - # { - # &log(ERROR, "error during transfer: $@"); - # &log(WARN, "errors during transfer, starting compression"); - # - # # Run file_init_archive - this is the minimal config needed to run archive pulling !!! need to close the old file - # my $oFile = BackRest::File->new - # ( - # # strStanza => $strStanza, - # # bNoCompression => false, - # # strBackupPath => config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true), - # # strCommand => $0, - # # strCommandCompress => config_key_load(CONFIG_SECTION_COMMAND, CONFIG_KEY_COMPRESS, $bCompress), - # # strCommandDecompress => config_key_load(CONFIG_SECTION_COMMAND, CONFIG_KEY_DECOMPRESS, $bCompress) - # ); - # - # backup_init - # ( - # undef, - # $oFile, - # undef, - # $bCompress, - # undef, - # !$bChecksum, - # config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_MAX), - # undef, - # config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_TIMEOUT) - # ); - # - # archive_compress($strArchivePath . "/archive/${strStanza}", $strCommand, 256); - # } - # else - # { - # confess $@; - # } - # } + else + { + &log(DEBUG, 'no more logs to transfer - exiting'); + } + } lock_file_remove(); remote_exit(0); @@ -550,7 +343,7 @@ if ($strOperation eq OP_ARCHIVE_PUSH) #################################################################################################################################### # ARCHIVE-GET Command #################################################################################################################################### -if ($strOperation eq OP_ARCHIVE_GET) +if (operationTest(OP_ARCHIVE_GET)) { # Make sure the archive file is defined if (!defined($ARGV[1])) @@ -565,12 +358,14 @@ if ($strOperation eq OP_ARCHIVE_GET) } # Init the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strRemote => $strRemote, - oRemote => remote_get(), - strBackupPath => config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true) + optionGet(OPTION_STANZA), + $strRemote eq BACKUP ? optionGet(OPTION_REPO_REMOTE_PATH) : optionGet(OPTION_REPO_PATH), + $strRemote, + remote_get(false, + optionGet(OPTION_COMPRESS_LEVEL), + optionGet(OPTION_COMPRESS_LEVEL_NETWORK)) ); # Init the backup object @@ -584,120 +379,154 @@ if ($strOperation eq OP_ARCHIVE_GET) &log(INFO, 'getting archive log ' . $ARGV[1]); # Get the archive file - remote_exit(archive_get(config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_PATH), $ARGV[1], $ARGV[2])); + remote_exit(archive_get(optionGet(OPTION_DB_PATH, false), $ARGV[1], $ARGV[2])); } #################################################################################################################################### -# OPEN THE LOG FILE +# Initialize the default file object #################################################################################################################################### -if (defined(config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_HOST))) +my $oFile = new BackRest::File +( + optionGet(OPTION_STANZA), + $strRemote eq BACKUP ? optionGet(OPTION_REPO_REMOTE_PATH) : optionGet(OPTION_REPO_PATH), + $strRemote, + remote_get(false, + operationTest(OP_EXPIRE) ? OPTION_DEFAULT_COMPRESS_LEVEL : optionGet(OPTION_COMPRESS_LEVEL), + operationTest(OP_EXPIRE) ? OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK : optionGet(OPTION_COMPRESS_LEVEL_NETWORK)) +); + +#################################################################################################################################### +# RESTORE +#################################################################################################################################### +if (operationTest(OP_RESTORE)) { - confess &log(ASSERT, 'backup/expire operations must be performed locally on the backup server'); -} + if ($strRemote eq DB) + { + confess &log(ASSERT, 'restore operation must be performed locally on the db server'); + } -log_file_set(config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true) . "/log/${strStanza}"); + # Open the log file + log_file_set(optionGet(OPTION_REPO_PATH) . '/log/' . optionGet(OPTION_STANZA) . '-restore'); + + # Set the lock path + my $strLockPath = optionGet(OPTION_REPO_PATH) . '/lock/' . + optionGet(OPTION_STANZA) . '-' . operationGet() . '.lock'; + + # Do the restore + use BackRest::Restore; + new BackRest::Restore + ( + optionGet(OPTION_DB_PATH), + optionGet(OPTION_SET), + optionGet(OPTION_RESTORE_TABLESPACE_MAP, false), + $oFile, + optionGet(OPTION_THREAD_MAX), + optionGet(OPTION_DELTA), + optionGet(OPTION_FORCE), + optionGet(OPTION_TYPE), + optionGet(OPTION_TARGET, false), + optionGet(OPTION_TARGET_EXCLUSIVE, false), + optionGet(OPTION_TARGET_RESUME, false), + optionGet(OPTION_TARGET_TIMELINE, false), + optionGet(OPTION_RESTORE_RECOVERY_SETTING, false), + optionGet(OPTION_STANZA), + $0, + optionGet(OPTION_CONFIG) + )->restore; + + remote_exit(0); +} #################################################################################################################################### # GET MORE CONFIG INFO #################################################################################################################################### +# Open the log file +log_file_set(optionGet(OPTION_REPO_PATH) . '/log/' . optionGet(OPTION_STANZA)); + # Make sure backup and expire operations happen on the backup side -if ($strRemote eq REMOTE_BACKUP) +if ($strRemote eq BACKUP) { confess &log(ERROR, 'backup and expire operations must run on the backup host'); } -# Set the backup type -if (!defined($strType)) -{ - $strType = 'incremental'; -} -elsif ($strType eq 'diff') -{ - $strType = 'differential'; -} -elsif ($strType eq 'incr') -{ - $strType = 'incremental'; -} -elsif ($strType ne 'full' && $strType ne 'differential' && $strType ne 'incremental') -{ - confess &log(ERROR, 'backup type must be full, differential (diff), incremental (incr)'); -} - -# Get the operational flags -my $bCompress = config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_COMPRESS, true, 'y') eq 'y' ? true : false; -my $bChecksum = config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_CHECKSUM, true, 'y') eq 'y' ? true : false; - # Set the lock path -my $strLockPath = config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true) . "/lock/${strStanza}-${strOperation}.lock"; +my $strLockPath = optionGet(OPTION_REPO_PATH) . '/lock/' . optionGet(OPTION_STANZA) . '-' . operationGet() . '.lock'; if (!lock_file_create($strLockPath)) { - &log(ERROR, "backup process is already running for stanza ${strStanza} - exiting"); + &log(ERROR, 'backup process is already running for stanza ' . optionGet(OPTION_STANZA) . ' - exiting'); remote_exit(0); } -# Run file_init_archive - the rest of the file config required for backup and restore -my $oFile = BackRest::File->new -( - strStanza => $strStanza, - strRemote => $strRemote, - oRemote => remote_get(), - strBackupPath => config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_PATH, true) -); +# Initialize the db object +use BackRest::Db; +my $oDb; -my $oDb = BackRest::Db->new -( - strDbUser => config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_USER), - strDbHost => config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_HOST), - strCommandPsql => config_key_load(CONFIG_SECTION_COMMAND, CONFIG_KEY_PSQL), - oDbSSH => $oFile->{oDbSSH} -); +if (operationTest(OP_BACKUP)) +{ + if (!optionGet(OPTION_NO_START_STOP)) + { + $oDb = new BackRest::Db + ( + optionGet(OPTION_COMMAND_PSQL), + optionGet(OPTION_DB_HOST, false), + optionGet(OPTION_DB_USER, optionTest(OPTION_DB_HOST)) + ); + } -# Run backup_init - parameters required for backup and restore operations -backup_init -( - $oDb, - $oFile, - $strType, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_COMPRESS, true, 'y') eq 'y' ? true : false, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_HARDLINK, true, 'y') eq 'y' ? true : false, - !$bChecksum, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_MAX), - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_ARCHIVE_REQUIRED, true, 'y') eq 'y' ? true : false, - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_THREAD_TIMEOUT), - $bTest, - $iTestDelay -); + # Run backup_init - parameters required for backup and restore operations + backup_init + ( + $oDb, + $oFile, + optionGet(OPTION_TYPE), + optionGet(OPTION_COMPRESS), + optionGet(OPTION_HARDLINK), + optionGet(OPTION_THREAD_MAX), + optionGet(OPTION_THREAD_TIMEOUT, false), + optionGet(OPTION_NO_START_STOP), + optionTest(OPTION_FORCE) + ); +} #################################################################################################################################### # BACKUP #################################################################################################################################### -if ($strOperation eq OP_BACKUP) +if (operationTest(OP_BACKUP)) { - backup(config_key_load(CONFIG_SECTION_STANZA, CONFIG_KEY_PATH), - config_key_load(CONFIG_SECTION_BACKUP, CONFIG_KEY_START_FAST, true, 'n') eq 'y' ? true : false); + use BackRest::Backup; + backup(optionGet(OPTION_DB_PATH), optionGet(OPTION_START_FAST)); - $strOperation = OP_EXPIRE; + operationSet(OP_EXPIRE); } #################################################################################################################################### # EXPIRE #################################################################################################################################### -if ($strOperation eq OP_EXPIRE) +if (operationTest(OP_EXPIRE)) { + if (!defined($oDb)) + { + backup_init + ( + undef, + $oFile + ); + } + backup_expire ( $oFile->path_get(PATH_BACKUP_CLUSTER), - config_key_load(CONFIG_SECTION_RETENTION, CONFIG_KEY_FULL_RETENTION), - config_key_load(CONFIG_SECTION_RETENTION, CONFIG_KEY_DIFFERENTIAL_RETENTION), - config_key_load(CONFIG_SECTION_RETENTION, CONFIG_KEY_ARCHIVE_RETENTION_TYPE), - config_key_load(CONFIG_SECTION_RETENTION, CONFIG_KEY_ARCHIVE_RETENTION) + optionGet(OPTION_RETENTION_FULL, false), + optionGet(OPTION_RETENTION_DIFF, false), + optionGet(OPTION_RETENTION_ARCHIVE_TYPE, false), + optionGet(OPTION_RETENTION_ARCHIVE, false) ); lock_file_remove(); } +backup_cleanup(); remote_exit(0); }; @@ -706,6 +535,14 @@ remote_exit(0); #################################################################################################################################### if ($@) { + my $oMessage = $@; + + # If a backrest exception then return the code - don't confess + if ($oMessage->isa('BackRest::Exception')) + { + remote_exit($oMessage->code()); + } + remote_exit(); confess $@; } diff --git a/bin/pg_backrest_remote.pl b/bin/pg_backrest_remote.pl index 5430f30b7..910bec28d 100755 --- a/bin/pg_backrest_remote.pl +++ b/bin/pg_backrest_remote.pl @@ -7,11 +7,11 @@ # Perl includes #################################################################################################################################### use strict; -use warnings; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; use Getopt::Long; -use Carp; use lib dirname($0) . '/../lib'; use BackRest::Utility; @@ -54,16 +54,21 @@ sub param_get log_level_set(OFF, OFF); # Create the remote object -my $oRemote = BackRest::Remote->new(); - -# Create the file object -my $oFile = BackRest::File->new +my $oRemote = new BackRest::Remote ( - oRemote => $oRemote + undef, # Host + undef, # User + 'remote' # Command ); -# Write the greeting so remote process knows who we are -$oRemote->greeting_write(); +# Create the file object +my $oFile = new BackRest::File +( + undef, + undef, + undef, + $oRemote, +); # Command string my $strCommand = OP_NOOP; @@ -77,26 +82,58 @@ while ($strCommand ne OP_EXIT) eval { - # Copy a file to STDOUT - if ($strCommand eq OP_FILE_COPY_OUT) + # Copy file + if ($strCommand eq OP_FILE_COPY || + $strCommand eq OP_FILE_COPY_IN || + $strCommand eq OP_FILE_COPY_OUT) { - $oFile->copy(PATH_ABSOLUTE, param_get(\%oParamHash, 'source_file'), - PIPE_STDOUT, undef, - param_get(\%oParamHash, 'source_compressed'), undef); + my $bResult; + my $strChecksum; + my $iFileSize; - $oRemote->output_write(); - } - # Copy a file from STDIN - elsif ($strCommand eq OP_FILE_COPY_IN) - { - $oFile->copy(PIPE_STDIN, undef, - PATH_ABSOLUTE, param_get(\%oParamHash, 'destination_file'), - undef, param_get(\%oParamHash, 'destination_compress'), - undef, undef, - param_get(\%oParamHash, 'permission', false), - param_get(\%oParamHash, 'destination_path_create')); + # Copy a file locally + if ($strCommand eq OP_FILE_COPY) + { + ($bResult, $strChecksum, $iFileSize) = + $oFile->copy(PATH_ABSOLUTE, param_get(\%oParamHash, 'source_file'), + PATH_ABSOLUTE, param_get(\%oParamHash, 'destination_file'), + param_get(\%oParamHash, 'source_compressed'), + param_get(\%oParamHash, 'destination_compress'), + param_get(\%oParamHash, 'ignore_missing_source', false), + undef, + param_get(\%oParamHash, 'mode', false), + param_get(\%oParamHash, 'destination_path_create') ? 'Y' : 'N', + param_get(\%oParamHash, 'user', false), + param_get(\%oParamHash, 'group', false), + param_get(\%oParamHash, 'append_checksum', false)); + } + # Copy a file from STDIN + elsif ($strCommand eq OP_FILE_COPY_IN) + { + ($bResult, $strChecksum, $iFileSize) = + $oFile->copy(PIPE_STDIN, undef, + PATH_ABSOLUTE, param_get(\%oParamHash, 'destination_file'), + param_get(\%oParamHash, 'source_compressed'), + param_get(\%oParamHash, 'destination_compress'), + undef, undef, + param_get(\%oParamHash, 'mode', false), + param_get(\%oParamHash, 'destination_path_create'), + param_get(\%oParamHash, 'user', false), + param_get(\%oParamHash, 'group', false), + param_get(\%oParamHash, 'append_checksum', false)); + } + # Copy a file to STDOUT + elsif ($strCommand eq OP_FILE_COPY_OUT) + { + ($bResult, $strChecksum, $iFileSize) = + $oFile->copy(PATH_ABSOLUTE, param_get(\%oParamHash, 'source_file'), + PIPE_STDOUT, undef, + param_get(\%oParamHash, 'source_compressed'), + param_get(\%oParamHash, 'destination_compress')); + } - $oRemote->output_write(); + $oRemote->output_write(($bResult ? 'Y' : 'N') . " " . (defined($strChecksum) ? $strChecksum : '?') . " " . + (defined($iFileSize) ? $iFileSize : '?')); } # List files in a path elsif ($strCommand eq OP_FILE_LIST) @@ -121,7 +158,7 @@ while ($strCommand ne OP_EXIT) # Create a path elsif ($strCommand eq OP_FILE_PATH_CREATE) { - $oFile->path_create(PATH_ABSOLUTE, param_get(\%oParamHash, 'path'), param_get(\%oParamHash, 'permission', false)); + $oFile->path_create(PATH_ABSOLUTE, param_get(\%oParamHash, 'path'), param_get(\%oParamHash, 'mode', false)); $oRemote->output_write(); } # Check if a file/path exists @@ -129,18 +166,10 @@ while ($strCommand ne OP_EXIT) { $oRemote->output_write($oFile->exists(PATH_ABSOLUTE, param_get(\%oParamHash, 'path')) ? 'Y' : 'N'); } - # Copy a file locally - elsif ($strCommand eq OP_FILE_COPY) + # Wait + elsif ($strCommand eq OP_FILE_WAIT) { - $oRemote->output_write( - $oFile->copy(PATH_ABSOLUTE, param_get(\%oParamHash, 'source_file'), - PATH_ABSOLUTE, param_get(\%oParamHash, 'destination_file'), - param_get(\%oParamHash, 'source_compressed'), - param_get(\%oParamHash, 'destination_compress'), - param_get(\%oParamHash, 'ignore_missing_source', false), - undef, - param_get(\%oParamHash, 'permission', false), - param_get(\%oParamHash, 'destination_path_create')) ? 'Y' : 'N'); + $oRemote->output_write($oFile->wait(PATH_ABSOLUTE)); } # Generate a manifest elsif ($strCommand eq OP_FILE_MANIFEST) @@ -149,7 +178,7 @@ while ($strCommand ne OP_EXIT) $oFile->manifest(PATH_ABSOLUTE, param_get(\%oParamHash, 'path'), \%oManifestHash); - my $strOutput = "name\ttype\tuser\tgroup\tpermission\tmodification_time\tinode\tsize\tlink_destination"; + my $strOutput = "name\ttype\tuser\tgroup\tmode\tmodification_time\tinode\tsize\tlink_destination"; foreach my $strName (sort(keys $oManifestHash{name})) { @@ -157,7 +186,7 @@ while ($strCommand ne OP_EXIT) $oManifestHash{name}{"${strName}"}{type} . "\t" . (defined($oManifestHash{name}{"${strName}"}{user}) ? $oManifestHash{name}{"${strName}"}{user} : "") . "\t" . (defined($oManifestHash{name}{"${strName}"}{group}) ? $oManifestHash{name}{"${strName}"}{group} : "") . "\t" . - (defined($oManifestHash{name}{"${strName}"}{permission}) ? $oManifestHash{name}{"${strName}"}{permission} : "") . "\t" . + (defined($oManifestHash{name}{"${strName}"}{mode}) ? $oManifestHash{name}{"${strName}"}{mode} : "") . "\t" . (defined($oManifestHash{name}{"${strName}"}{modification_time}) ? $oManifestHash{name}{"${strName}"}{modification_time} : "") . "\t" . (defined($oManifestHash{name}{"${strName}"}{inode}) ? $oManifestHash{name}{"${strName}"}{inode} : "") . "\t" . diff --git a/doc/doc.dtd b/doc/doc.dtd new file mode 100644 index 000000000..9532b9bc1 --- /dev/null +++ b/doc/doc.dtd @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/doc.pl b/doc/doc.pl new file mode 100755 index 000000000..cb792f9f2 --- /dev/null +++ b/doc/doc.pl @@ -0,0 +1,761 @@ +#!/usr/bin/perl +#################################################################################################################################### +# pg_backrest.pl - Simple Postgres Backup and Restore +#################################################################################################################################### + +#################################################################################################################################### +# Perl includes +#################################################################################################################################### +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename qw(dirname); +use Pod::Usage qw(pod2usage); +use Getopt::Long qw(GetOptions); +use XML::Checker::Parser; + +use lib dirname($0) . '/../lib'; +use BackRest::Utility; +use BackRest::Config; + +#################################################################################################################################### +# Usage +#################################################################################################################################### + +=head1 NAME + +doc.pl - Generate PgBackRest documentation + +=head1 SYNOPSIS + +doc.pl [options] [operation] + + General Options: + --help display usage and exit + +=cut + +#################################################################################################################################### +# DOC_RENDER_TAG - render a tag to another markup language +#################################################################################################################################### +my $oRenderTag = +{ + 'markdown' => + { + 'b' => ['**', '**'], + 'i' => ['_', '_'], + 'bi' => ['_**', '**_'], + 'ul' => ["\n", ''], + 'ol' => ["\n", ''], + 'li' => ['- ', "\n"], + 'id' => ['`', '`'], + 'file' => ['`', '`'], + 'path' => ['`', '`'], + 'cmd' => ['`', '`'], + 'param' => ['`', '`'], + 'setting' => ['`', '`'], + 'code' => ['`', '`'], + 'code-block' => ['```', '```'], + 'backrest' => ['PgBackRest', ''], + 'postgres' => ['PostgreSQL', ''] + }, + + 'html' => + { + 'b' => ['', ''] + } +}; + +sub doc_render_tag +{ + my $oTag = shift; + my $strType = shift; + + my $strBuffer = ""; + + my $strTag = $$oTag{name}; + my $strStart = $$oRenderTag{$strType}{$strTag}[0]; + my $strStop = $$oRenderTag{$strType}{$strTag}[1]; + + if (!defined($strStart) || !defined($strStop)) + { + confess "invalid type ${strType} or tag ${strTag}"; + } + + $strBuffer .= $strStart; + + if ($strTag eq 'li') + { + $strBuffer .= doc_render_text($oTag, $strType); + } + elsif (defined($$oTag{value})) + { + $strBuffer .= $$oTag{value}; + } + elsif (defined($$oTag{children}[0])) + { + foreach my $oSubTag (@{doc_list($oTag)}) + { + $strBuffer .= doc_render_tag($oSubTag, $strType); + } + + } + + $strBuffer .= $strStop; +} + +#################################################################################################################################### +# DOC_RENDER_TEXT - Render a text node +#################################################################################################################################### +sub doc_render_text +{ + my $oText = shift; + my $strType = shift; + + my $strBuffer = ""; + + if (defined($$oText{children})) + { + for (my $iIndex = 0; $iIndex < @{$$oText{children}}; $iIndex++) + { + if (ref(\$$oText{children}[$iIndex]) eq "SCALAR") + { + $strBuffer .= $$oText{children}[$iIndex]; + } + else + { + $strBuffer .= doc_render_tag($$oText{children}[$iIndex], $strType); + } + } + } + + return $strBuffer; +} + +#################################################################################################################################### +# DOC_GET - Get a node +#################################################################################################################################### +sub doc_get +{ + my $oDoc = shift; + my $strName = shift; + my $bRequired = shift; + + my $oNode; + + for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) + { + if ($$oDoc{children}[$iIndex]{name} eq $strName) + { + if (!defined($oNode)) + { + $oNode = $$oDoc{children}[$iIndex]; + } + else + { + confess "found more than one child ${strName} in node $$oDoc{name}"; + } + } + } + + if (!defined($oNode) && (!defined($bRequired) || $bRequired)) + { + confess "unable to find child ${strName} in node $$oDoc{name}"; + } + + return $oNode; +} + +#################################################################################################################################### +# DOC_GET - Test if a node exists +#################################################################################################################################### +sub doc_exists +{ + my $oDoc = shift; + my $strName = shift; + + my $bExists = false; + + for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) + { + if ($$oDoc{children}[$iIndex]{name} eq $strName) + { + return true; + } + } + + return false; +} + +#################################################################################################################################### +# DOC_LIST - Get a list of nodes +#################################################################################################################################### +sub doc_list +{ + my $oDoc = shift; + my $strName = shift; + my $bRequired = shift; + + my @oyNode; + + for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) + { + if (!defined($strName) || $$oDoc{children}[$iIndex]{name} eq $strName) + { + push(@oyNode, $$oDoc{children}[$iIndex]); + } + } + + if (@oyNode == 0 && (!defined($bRequired) || $bRequired)) + { + confess "unable to find child ${strName} in node $$oDoc{name}"; + } + + return \@oyNode; +} + +#################################################################################################################################### +# DOC_VALUE - Get value from a node +#################################################################################################################################### +sub doc_value +{ + my $oNode = shift; + my $strDefault = shift; + + if (defined($oNode) && defined($$oNode{value})) + { + return $$oNode{value}; + } + + return $strDefault; +} + +#################################################################################################################################### +# DOC_PARSE - Parse the XML tree into something more usable +#################################################################################################################################### +sub doc_parse +{ + my $strName = shift; + my $oyNode = shift; + + my %oOut; + my $iIndex = 0; + my $bText = $strName eq 'text' || $strName eq 'li'; + + # Store the node name + $oOut{name} = $strName; + + if (keys($$oyNode[$iIndex])) + { + $oOut{param} = $$oyNode[$iIndex]; + } + + $iIndex++; + + # Look for strings and children + while (defined($$oyNode[$iIndex])) + { + # Process string data + if (ref(\$$oyNode[$iIndex]) eq 'SCALAR' && $$oyNode[$iIndex] eq '0') + { + $iIndex++; + my $strBuffer = $$oyNode[$iIndex++]; + + # Strip tabs, CRs, and LFs + $strBuffer =~ s/\t|\r//g; + + # If anything is left + if (length($strBuffer) > 0) + { + # If text node then create array entries for strings + if ($bText) + { + if (!defined($oOut{children})) + { + $oOut{children} = []; + } + + push($oOut{children}, $strBuffer); + } + # Don't allow strings mixed with children + elsif (length(trim($strBuffer)) > 0) + { + if (defined($oOut{children})) + { + confess "text mixed with children in node ${strName} (spaces count)"; + } + + if (defined($oOut{value})) + { + confess "value is already defined in node ${strName} - this shouldn't happen"; + } + + # Don't allow text mixed with + $oOut{value} = $strBuffer; + } + } + } + # Process a child + else + { + if (defined($oOut{value}) && $bText) + { + confess "text mixed with children in node ${strName} before child " . $$oyNode[$iIndex++] . " (spaces count)"; + } + + if (!defined($oOut{children})) + { + $oOut{children} = []; + } + + push($oOut{children}, doc_parse($$oyNode[$iIndex++], $$oyNode[$iIndex++])); + } + } + + return \%oOut; +} + +#################################################################################################################################### +# DOC_SAVE - save a doc +#################################################################################################################################### +sub doc_write +{ + my $strFileName = shift; + my $strBuffer = shift; + + # Open the file + my $hFile; + open($hFile, '>', $strFileName) + or confess &log(ERROR, "unable to open ${strFileName}"); + + # Write the buffer + my $iBufferOut = syswrite($hFile, $strBuffer); + + # Report any errors + if (!defined($iBufferOut) || $iBufferOut != length($strBuffer)) + { + confess "unable to write '${strBuffer}'" . (defined($!) ? ': ' . $! : ''); + } + + # Close the file + close($hFile); +} + +#################################################################################################################################### +# Load command line parameters and config +#################################################################################################################################### +my $bHelp = false; # Display usage +my $bVersion = false; # Display version +my $bQuiet = false; # Sets log level to ERROR +my $strLogLevel = 'info'; # Log level for tests + +GetOptions ('help' => \$bHelp, + 'version' => \$bVersion, + 'quiet' => \$bQuiet, + 'log-level=s' => \$strLogLevel) + or pod2usage(2); + +# Display version and exit if requested +if ($bHelp || $bVersion) +{ + print 'pg_backrest ' . version_get() . " doc builder\n"; + + if ($bHelp) + { + print "\n"; + pod2usage(); + } + + exit 0; +} + +# Set console log level +if ($bQuiet) +{ + $strLogLevel = 'off'; +} + +log_level_set(undef, uc($strLogLevel)); + +#################################################################################################################################### +# Load the doc file +#################################################################################################################################### +# Initialize parser object and parse the file +my $oParser = XML::Checker::Parser->new(ErrorContext => 2, Style => 'Tree'); +my $strFile = dirname($0) . '/doc.xml'; +my $oTree; + +eval +{ + local $XML::Checker::FAIL = sub + { + my $iCode = shift; + + die XML::Checker::error_string($iCode, @_); + }; + + $oTree = $oParser->parsefile(dirname($0) . '/doc.xml'); +}; + +# Report any error that stopped parsing +if ($@) +{ + $@ =~ s/at \/.*?$//s; # remove module line number + die "malformed xml in '$strFile}':\n" . trim($@); +} + +#################################################################################################################################### +# Build the document from xml +#################################################################################################################################### +my $oDocIn = doc_parse(${$oTree}[0], ${$oTree}[1]); + +sub doc_build +{ + my $oDoc = shift; + + # Initialize the node object + my $oOut = {name => $$oDoc{name}, children => []}; + my $strError = "in node $$oDoc{name}"; + + # Get all params + if (defined($$oDoc{param})) + { + for my $strParam (keys $$oDoc{param}) + { + $$oOut{param}{$strParam} = $$oDoc{param}{$strParam}; + } + } + + if (defined($$oDoc{children})) + { + for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) + { + my $oSub = $$oDoc{children}[$iIndex]; + my $strName = $$oSub{name}; + + if ($strName eq 'text') + { + $$oOut{field}{text} = $oSub; + } + elsif (defined($$oSub{value})) + { + $$oOut{field}{$strName} = $$oSub{value}; + } + elsif (!defined($$oSub{children})) + { + $$oOut{field}{$strName} = true; + } + else + { + push($$oOut{children}, doc_build($oSub)); + } + } + } + + return $oOut; +} + +my $oDocOut = doc_build($oDocIn); + +#################################################################################################################################### +# Build commands pulled from the code +#################################################################################################################################### +# Get the option rules +my $oOptionRule = optionRuleGet(); +my %oOptionFound; + +sub doc_out_get +{ + my $oNode = shift; + my $strName = shift; + my $bRequired = shift; + + foreach my $oChild (@{$$oNode{children}}) + { + if ($$oChild{name} eq $strName) + { + return $oChild; + } + } + + if (!defined($bRequired) || $bRequired) + { + confess "unable to find child node '${strName}' in node '$$oNode{name}'"; + } + + return undef; +} + +sub doc_option_list_process +{ + my $oOptionListOut = shift; + my $strOperation = shift; + + foreach my $oOptionOut (@{$$oOptionListOut{children}}) + { + my $strOption = $$oOptionOut{param}{id}; + + # if (defined($oOptionFound{$strOption})) + # { + # confess "option ${strOption} has already been found"; + # } + + if ($strOption eq 'help' || $strOption eq 'version') + { + next; + } + + $oOptionFound{$strOption} = true; + + if (!defined($$oOptionRule{$strOption}{&OPTION_RULE_TYPE})) + { + confess "unable to find option $strOption"; + } + + $$oOptionOut{field}{default} = optionDefault($strOption, $strOperation); + + if (defined($$oOptionOut{field}{default})) + { + $$oOptionOut{field}{required} = false; + + if ($$oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq &OPTION_TYPE_BOOLEAN) + { + $$oOptionOut{field}{default} = $$oOptionOut{field}{default} ? 'y' : 'n'; + } + } + else + { + $$oOptionOut{field}{required} = optionRequired($strOption, $strOperation); + } + + if (defined($strOperation)) + { + $$oOptionOut{field}{cmd} = true; + } + + if ($strOption eq 'cmd-remote') + { + $$oOptionOut{field}{default} = 'same as local'; + } + + # &log(INFO, "operation " . (defined($strOperation) ? $strOperation : '[undef]') . + # ", option ${strOption}, required $$oOptionOut{field}{required}" . + # ", default " . (defined($$oOptionOut{field}{default}) ? $$oOptionOut{field}{default} : 'undef')); + } +} + +# Ouput general options +my $oOperationGeneralOptionListOut = doc_out_get(doc_out_get(doc_out_get($oDocOut, 'operation'), 'operation-general'), 'option-list'); +doc_option_list_process($oOperationGeneralOptionListOut); + +# Ouput commands +my $oCommandListOut = doc_out_get(doc_out_get($oDocOut, 'operation'), 'command-list'); + +foreach my $oCommandOut (@{$$oCommandListOut{children}}) +{ + my $strOperation = $$oCommandOut{param}{id}; + + my $oOptionListOut = doc_out_get($oCommandOut, 'option-list', false); + + if (defined($oOptionListOut)) + { + doc_option_list_process($oOptionListOut, $strOperation); + } + + my $oExampleListOut = doc_out_get($oCommandOut, 'command-example-list'); + + foreach my $oExampleOut (@{$$oExampleListOut{children}}) + { + if (defined($$oExampleOut{param}{title})) + { + $$oExampleOut{param}{title} = 'Example: ' . $$oExampleOut{param}{title}; + } + else + { + $$oExampleOut{param}{title} = 'Example'; + } + } + + # $$oExampleListOut{param}{title} = 'Examples'; +} + +# Ouput config section +my $oConfigSectionListOut = doc_out_get(doc_out_get($oDocOut, 'config'), 'config-section-list'); + +foreach my $oConfigSectionOut (@{$$oConfigSectionListOut{children}}) +{ + my $oOptionListOut = doc_out_get($oConfigSectionOut, 'config-key-list', false); + + if (defined($oOptionListOut)) + { + doc_option_list_process($oOptionListOut); + } +} + +# Mark undocumented features as processed +$oOptionFound{'no-fork'} = true; +$oOptionFound{'test'} = true; +$oOptionFound{'test-delay'} = true; + +# Make sure all options were processed +foreach my $strOption (sort(keys($oOptionRule))) +{ + if (!defined($oOptionFound{$strOption})) + { + confess "option ${strOption} was not found"; + } +} + +#################################################################################################################################### +# Render the document +#################################################################################################################################### +sub doc_render +{ + my $oDoc = shift; + my $strType = shift; + my $iDepth = shift; + my $bChildList = shift; + + my $strBuffer = ""; + my $bList = $$oDoc{name} =~ /.*-bullet-list$/; + $bChildList = defined($bChildList) ? $bChildList : false; + my $iChildDepth = $iDepth; + + if ($strType eq 'markdown') + { + if (defined($$oDoc{param}{id})) + { + my @stryToken = split('-', $$oDoc{name}); + my $strTitle = @stryToken == 0 ? '[unknown]' : $stryToken[@stryToken - 1]; + + $strBuffer = ('#' x $iDepth) . " `$$oDoc{param}{id}` " . $strTitle; + } + + if (defined($$oDoc{param}{title})) + { + $strBuffer = ('#' x $iDepth) . ' '; + + if (defined($$oDoc{param}{version})) + { + $strBuffer .= "v$$oDoc{param}{version}: "; + } + + $strBuffer .= $$oDoc{param}{title}; + } + + if (defined($$oDoc{param}{subtitle})) + { + if (!defined($$oDoc{param}{subtitle})) + { + confess "subtitle not valid without title"; + } + + $strBuffer .= " - " . $$oDoc{param}{subtitle}; + } + + if ($strBuffer ne "") + { + $iChildDepth++; + } + + if (defined($$oDoc{field}{text})) + { + if ($strBuffer ne "") + { + $strBuffer .= "\n\n"; + } + + if ($bChildList) + { + $strBuffer .= '- '; + } + + $strBuffer .= doc_render_text($$oDoc{field}{text}, $strType); + } + + if ($$oDoc{name} eq 'config-key' || $$oDoc{name} eq 'option') + { + my $strError = "config section ?, key $$oDoc{param}{id} requires"; + + my $bRequired = defined($$oDoc{field}{required}) && $$oDoc{field}{required}; + my $strDefault = $$oDoc{field}{default}; + my $strAllow = $$oDoc{field}{allow}; + my $strOverride = $$oDoc{field}{override}; + my $strExample = $$oDoc{field}{example}; + + if (defined($strExample)) + { + if (index($strExample, '=') == -1) + { + $strExample = "=${strExample}"; + } + else + { + $strExample = " ${strExample}"; + } + + $strExample = "$$oDoc{param}{id}${strExample}"; + + if (defined($$oDoc{field}{cmd}) && $$oDoc{field}{cmd}) + { + $strExample = '--' . $strExample; + + if (index($$oDoc{field}{example}, ' ') != -1) + { + $strExample = "\"${strExample}\""; + } + } + } + + $strBuffer .= "\n```\n" . + "required: " . ($bRequired ? 'y' : 'n') . "\n" . + (defined($strDefault) ? "default: ${strDefault}\n" : '') . + (defined($strAllow) ? "allow: ${strAllow}\n" : '') . + (defined($strOverride) ? "override: ${strOverride}\n" : '') . + (defined($strExample) ? "example: ${strExample}\n" : '') . + "```"; + } + + if ($strBuffer ne "" && $iDepth != 1 && !$bList) + { + $strBuffer = "\n\n" . $strBuffer; + } + } + else + { + confess "unknown type ${strType}"; + } + + my $bFirst = true; + + foreach my $oChild (@{$$oDoc{children}}) + { + if ($strType eq 'markdown') + { + } + else + { + confess "unknown type ${strType}"; + } + + $strBuffer .= doc_render($oChild, $strType, $iChildDepth, $bList); + } + + if ($iDepth == 1) + { + if ($strType eq 'markdown') + { + $strBuffer .= "\n"; + } + else + { + confess "unknown type ${strType}"; + } + } + + return $strBuffer; +} + +# Write markdown +doc_write(dirname($0) . '/../README.md', doc_render($oDocOut, 'markdown', 1)); diff --git a/doc/doc.xml b/doc/doc.xml new file mode 100644 index 000000000..f1b135e2d --- /dev/null +++ b/doc/doc.xml @@ -0,0 +1,799 @@ + + + + + aims to be a simple backup and restore system that can seamlessly scale up to the largest databases and workloads. + + Primary features: +
    +
  • Local or remote backup
  • +
  • Multi-threaded backup/restore for performance
  • +
  • Checksums
  • +
  • Safe backups (checks that logs required for consistency are present before backup completes)
  • +
  • Full, differential, and incremental backups
  • +
  • Backup rotation (and minimum retention rules with optional separate retention for archive)
  • +
  • In-stream compression/decompression
  • +
  • Archiving and retrieval of logs for replicas/restores built in
  • +
  • Async archiving for very busy systems (including space limits)
  • +
  • Backup directories are consistent Postgres clusters (when hardlinks are on and compression is off)
  • +
  • Tablespace support
  • +
  • Restore delta option
  • +
  • Restore using timestamp/size or checksum
  • +
  • Restore remapping base/tablespaces
  • +
+ Instead of relying on traditional backup tools like tar and rsync, implements all backup features internally and uses a custom protocol for communicating with remote systems. Removing reliance on tar and rsync allows for better solutions to database-specific backup issues. The custom remote protocol limits the types of connections that are required to perform a backup which increases security.
+
+ + + is written entirely in Perl and uses some non-standard modules that must be installed from CPAN. + + + + * Starting from a clean install, update the OS: + + apt-get update + apt-get upgrade (reboot if required) + + * Install ssh, git and cpanminus: + + apt-get install ssh + apt-get install git + apt-get install cpanminus + + * Install Postgres (instructions from http://www.postgresql.org/download/linux/ubuntu/) + + Create the file /etc/apt/sources.list.d/pgdg.list, and add a line for the repository: + + deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main + + * Then run the following: + + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + + apt-get install postgresql-9.3 + apt-get install postgresql-server-dev-9.3 + + * Install required Perl modules: + + cpanm JSON + cpanm Net::OpenSSH + cpanm IPC::System::Simple + cpanm Digest::SHA + cpanm Compress::ZLib + + * Install PgBackRest + + can be installed by downloading the most recent release: + + https://github.com/pgmasters/backrest/releases + + can be installed anywhere but it's best (though not required) to install it in the same location on all systems. + + + + + + + These options are either global or used by all commands. + + + + + + + + + + + + + + + + + + + + Perform a database backup. does not have a built-in scheduler so it's best to run it from cron or some other scheduling mechanism. + + + + + + + + + + + + + + + +/path/to/pg_backrest.pl --stanza=db --type=full backup + +Run a full backup on the db stanza. --type can also be set to incr or diff for incremental or differential backups. However, if no full backup exists then a full backup will be forced even if incr or diff is requested. + + + + + + + Archive a WAL segment to the repository. + + + + + /path/to/pg_backrest.pl --stanza=db archive-push %p + + Accepts a WAL segment from and archives it in the repository. %p is how specifies the location of the WAL segment to be archived. + + + + + + + Get a WAL segment from the repository. + + + + + /path/to/pg_backrest.pl --stanza=db archive-get %f %p + + Retrieves a WAL segment from the repository. This command is used in restore.conf to restore a backup, perform PITR, or as an alternative to streaming for keeping a replica up to date. %f is how specifies the WAL segment it needs and %p is the location where it should be copied. + + + + + + + does backup rotation, but is not concerned with when the backups were created. So if two full backups are configured for rentention, will keep two full backups no matter whether they occur, two hours apart or two weeks apart. + + + + + /path/to/pg_backrest.pl --stanza=db expire + + Expire (rotate) any backups that exceed the defined retention. Expiration is run automatically after every successful backup, so there is no need to run this command separately unless you have reduced rentention, usually to free up some space. + + + + + + + Perform a database restore. This command is generall run manually, but there are instances where it might be automated. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /path/to/pg_backrest.pl --stanza=db --type=name --target=release restore + + Restores the latest database backup and then recovers to the release restore point. + + + + + + + + can be used entirely with command-line parameters but a configuration file is more practical for installations that are complex or set a lot of options. The default location for the configuration file is /etc/pg_backrest.conf. + + + + Modify the following settings in postgresql.conf: + + wal_level = archive + archive_mode = on + archive_command = '/path/to/backrest/bin/pg_backrest.pl --stanza=db archive-push %p' + + Replace the path with the actual location where was installed. The stanza parameter should be changed to the actual stanza name for your database. + + + + + The absolute minimum required to run (if all defaults are accepted) is the database path. + + /etc/pg_backrest.conf: + + [main] + db-path=/data/db + + The db-path option could also be provided on the command line, but it's best to use a configuration file as options tend to pile up quickly. + + + + This configuration is appropriate for a small installation where backups are being made locally or to a remote file system that is mounted locally. A number of additional options are set: +
    +
  • cmd-psql - Custom location and parameters for psql.
  • +
  • cmd-psql-option - Options for psql can be set per stanza.
  • +
  • compress - Disable compression (handy if the file system is already compressed).
  • +
  • repo-path - Path to the repository where backups and WAL archive are stored.
  • +
  • log-level-file - Set the file log level to debug (Lots of extra info if something is not working as expected).
  • +
  • hardlink - Create hardlinks between backups (but never between full backups).
  • +
  • thread-max - Use 2 threads for backup/restore operations.
  • +
+ /etc/pg_backrest.conf: + + [global:command] + cmd-psql=/usr/local/bin/psql -X %option% + + [global:general] + compress=n + repo-path=/Users/dsteele/Documents/Code/backrest/test/test/backrest + + [global:log] + log-level-file=debug + + [global:backup] + hardlink=y + thread-max=2 + + [main] + db-path=/data/db + + [main:command] + cmd-psql-option=--port=5433 + +
+
+ + + This configuration is appropriate for a small installation where backups are being made remotely. Make sure that postgres@db-host has trusted ssh to backrest@backup-host and vice versa. This configuration assumes that you have pg_backrest_remote.pl and pg_backrest.pl in the same path on both servers. + + /etc/pg_backrest.conf on the db host: + + [global:general] + repo-path=/path/to/db/repo + repo-remote-path=/path/to/backup/repo + + [global:backup] + backup-host=backup.mydomain.com + backup-user=backrest + + [global:archive] + archive-async=y + + [main] + db-path=/data/db + + /etc/pg_backrest.conf on the backup host: + + [global:general] + repo-path=/path/to/backup/repo + + [main] + db-host=db.mydomain.com + db-path=/data/db + db-user=postgres + + + +
+ + + + + The command section defines the location of external commands that are used by . + + + + + Defines the full path to psql. psql is used to call pg_start_backup() and pg_stop_backup(). + + If addtional per stanza parameters need to be passed to psql (such as --port or --cluster) then add %option% to the command line and use command-option::psql to set options. + + /usr/bin/psql -X %option% + + + + + Allows per stanza command line parameters to be passed to psql. + + --port=5433 + + + + + Defines the location of pg_backrest_remote.pl. + + Required only if the path to pg_backrest_remote.pl is different on the local and remote systems. If not defined, the remote path will be assumed to be the same as the local path. + + same as local + /usr/lib/backrest/bin/pg_backrest_remote.pl + + + + + + + The log section defines logging-related settings. The following log levels are supported: +
    +
  • off - No logging at all (not recommended)
  • +
  • error - Log only errors
  • +
  • warn - Log warnings and errors
  • +
  • info - Log info, warnings, and errors
  • +
  • debug - Log debug, info, warnings, and errors
  • +
  • trace - Log trace (very verbose debugging), debug, info, warnings, and errors
  • +
+ + + + + Sets file log level. + + debug + + + + + Sets console log level. + + error + + +
+ + + + The general section defines settings that are shared between multiple operations. + + + + + Set the buffer size used for copy, compress, and uncompress functions. A maximum of 3 buffers will be in use at a time per thread. An additional maximum of 256K per thread may be used for zlib buffers. + + 4096 - 8388608 + 16384 + + + + + Enable gzip compression. Backup files are compatible with command-line gzip tools. + + n + + + + + Sets the zlib level to be used for file compression when compress=y. + + 0-9 + 9 + + + + + Sets the zlib level to be used for protocol compression when compress=n and the database is not on the same host as the backup. Protocol compression is used to reduce network traffic but can be disabled by setting compress-level-network=0. When compress=y the compress-level-network setting is ignored and compress-level is used instead so that the file is only compressed once. SSH compression is always disabled. + + 0-9 + 1 + + + + + Path to the backrest repository where WAL segments, backups, logs, etc are stored. + + /data/db/backrest + + + + + Path to the remote backrest repository where WAL segments, backups, logs, etc are stored. + + /backup/backrest + + + + + + + The backup section defines settings related to backup. + + + + + Sets the backup host when backup up remotely via SSH. Make sure that trusted SSH authentication is configured between the db host and the backup host. + + When backing up to a locally mounted network filesystem this setting is not required. + + backup.domain.com + + + + + Sets user account on the backup host. + + backrest + + + + + Forces a checkpoint (by passing true to the fast parameter of pg_start_backup()) so the backup begins immediately. + + y + + + + + Enable hard-linking of files in differential and incremental backups to their full backups. This gives the appearance that each backup is a full backup. Be careful, though, because modifying files that are hard-linked can affect all the backups in the set. + + y + + + + + Defines the number of threads to use for backup or restore. Each thread will perform compression and transfer to make the backup run faster, but don't set thread-max so high that it impacts database performance during backup. + 4 + + + + + Maximum amount of time (in seconds) that a backup thread should run. This limits the amount of time that a thread might be stuck due to unforeseen issues during the backup. Has no affect when thread-max=1. + + 3600 + + + + + Checks that all WAL segments required to make the backup consistent are present in the WAL archive. It's a good idea to leave this as the default unless you are using another method for archiving. + + n + + + + + Store WAL segments required to make the backup consistent in the backup's pg_xlog path. This slightly paranoid option protects against corruption or premature expiration in the WAL segment archive. PITR won't be possible without the WAL segment archive and this option also consumes more space. + + y + + + + + + + The archive section defines parameters when doing async archiving. This means that the archive files will be stored locally, then a background process will pick them and move them to the backup. + + + + + + Archive WAL segments asynchronously. WAL segments will be copied to the local repo, then a process will be forked to compress the segment and transfer it to the remote repo if configured. Control will be returned to as soon as the WAL segment is copied locally. + y + + + + + Limits the amount of archive log that will be written locally when compress-async=y. After the limit is reached, the following will happen: +
    +
  1. PgBackRest will notify Postgres that the archive was succesfully backed up, then DROP IT.
  2. +
  3. An error will be logged to the console and also to the Postgres log.
  4. +
  5. A stop file will be written in the lock directory and no more archive files will be backed up until it is removed.
  6. +
+ If this occurs then the archive log stream will be interrupted and PITR will not be possible past that point. A new backup will be required to regain full restore capability. + + The purpose of this feature is to prevent the log volume from filling up at which point Postgres will stop completely. Better to lose the backup than have the database go down. + + To start normal archiving again you'll need to remove the stop file which will be located at ${archive-path}/lock/${stanza}-archive.stop where ${archive-path} is the path set in the archive section, and ${stanza} is the backup stanza.
+ + 1024 +
+
+
+ + + + The expire section defines how long backups will be retained. Expiration only occurs when the number of complete backups exceeds the allowed retention. In other words, if full-retention is set to 2, then there must be 3 complete backups before the oldest will be expired. Make sure you always have enough space for rentention + 1 backups. + + + + + Number of full backups to keep. When a full backup expires, all differential and incremental backups associated with the full backup will also expire. When not defined then all full backups will be kept. + + 2 + + + + + Number of differential backups to keep. When a differential backup expires, all incremental backups associated with the differential backup will also expire. When not defined all differential backups will be kept. + + 3 + + + + + Type of backup to use for archive retention (full or differential). If set to full, then PgBackRest will keep archive logs for the number of full backups defined by archive-retention. If set to differential, then PgBackRest will keep archive logs for the number of differential backups defined by archive-retention. + + If not defined then archive logs will be kept indefinitely. In general it is not useful to keep archive logs that are older than the oldest backup, but there may be reasons for doing so. + + diff + + + + + Number of backups worth of archive log to keep. + + 2 + + + + + + + A stanza defines a backup for a specific database. The stanza section must define the base database path and host/user if the database is remote. Also, any global configuration sections can be overridden to define stanza-specific settings. + + + + + Define the database host. Used for backups where the database host is different from the backup host. + + db.domain.com + + + + + Defines user account on the db host when db-host is defined. + + postgres + + + + + Path to the db data directory (data_directory setting in postgresql.conf). + + /data/db + + + +
+
+ + + + + + + Added restore functionality. + + + All options can now be set on the command-line making pg_backrest.conf optional. + + + De/compression is now performed without threads and checksum/size is calculated in stream. That means file checksums are no longer optional. + + + Added option --no-start-stop to allow backups when Postgres is shut down. If postmaster.pid is present then --force is required to make the backup run (though if Postgres is running an inconsistent backup will likely be created). This option was added primarily for the purpose of unit testing, but there may be applications in the real world as well. + + + Fixed broken checksums and now they work with normal and resumed backups. Finally realized that checksums and checksum deltas should be functionally separated and this simplied a number of things. Issue #28 has been created for checksum deltas. + + + Fixed an issue where a backup could be resumed from an aborted backup that didn't have the same type and prior backup. + + + Removed dependency on Moose. It wasn't being used extensively and makes for longer startup times. + + + Checksum for backup.manifest to detect corrupted/modified manifest. + + + Link latest always points to the last backup. This has been added for convenience and to make restores simpler. + + + More comprehensive unit tests in all areas. + + + + + + + + Complete rewrite of BackRest::File module to use a custom protocol for remote operations and Perl native GZIP and SHA operations. Compression is performed in threads rather than forked processes. + + + Fairly comprehensive unit tests for all the basic operations. More work to be done here for sure, but then there is always more work to be done on unit tests. + + + Removed dependency on Storable and replaced with a custom ini file implementation. + + + Added much needed documentation + + + Numerous other changes that can only be identified with a diff. + + + + + + + + Working on improving error handling in the file object. This is not complete, but works well enough to find a few errors that have been causing us problems (notably, find is occasionally failing building the archive async manifest when system is under load). + + + Found and squashed a nasty bug where file_copy() was defaulted to ignore errors. There was also an issue in file_exists that was causing the test to fail when the file actually did exist. Together they could have resulted in a corrupt backup with no errors, though it is very unlikely. + + + + + + + + The archive-get operation returns a 1 when the archive file is missing to differentiate from hard errors (ssh connection failure, file copy error, etc.) This lets Postgres know that that the archive stream has terminated normally. However, this does not take into account possible holes in the archive stream. + + + + + + + + If an archive directory which should be empty could not be deleted backrest was throwing an error. There's a good fix for that coming, but for the time being it has been changed to a warning so processing can continue. This was impacting backups as sometimes the final archive file would not get pushed if the first archive file had been in a different directory (plus some bad luck). + + + + + + + + Added RequestTTY=yes to ssh sesssions. Hoping this will prevent random lockups. + + + + + + + + Added archive-get functionality to aid in restores. + + + Added option to force a checkpoint when starting the backup start-fast=y. + + + + + + + + Removed master_stderr_discard option on database SSH connections. There have been occasional lockups and they could be related to issues originally seen in the file code. + + + Changed lock file conflicts on backup and expire commands to ERROR. They were set to DEBUG due to a copy-and-paste from the archive locks. + + + + + + + + No restore functionality, but the backup directories are consistent Postgres data directories. You'll need to either uncompress the files or turn off compression in the backup. Uncompressed backups on a ZFS (or similar) filesystem are a good option because backups can be restored locally via a snapshot to create logical backups or do spot data recovery. + + + Archiving is single-threaded. This has not posed an issue on our multi-terabyte databases with heavy write volume. Recommend a large WAL volume or to use the async option with a large volume nearby. + + + Backups are multi-threaded, but the Net::OpenSSH library does not appear to be 100% threadsafe so it will very occasionally lock up on a thread. There is an overall process timeout that resolves this issue by killing the process. Yes, very ugly. + + + Checksums are lost on any resumed backup. Only the final backup will record checksum on multiple resumes. Checksums from previous backups are correctly recorded and a full backup will reset everything. + + + The backup.manifest is being written as Storable because Config::IniFile does not seem to handle large files well. Would definitely like to save these as human-readable text. + + + Absolutely no documentation (outside the code). Well, excepting these release notes. + + + + + + + + Primary recognition goes to Stephen Frost for all his valuable advice and criticism during the development of . + + Resonate (http://www.resonate.com/) also contributed to the development of PgBackRest and allowed me to install early (but well tested) versions as their primary Postgres backup solution. + +
diff --git a/doc/font/alpha_slab_one.woff b/doc/font/alpha_slab_one.woff new file mode 100644 index 0000000000000000000000000000000000000000..52879218968995e71f9c182e9208e288eb72f563 GIT binary patch literal 12444 zcmYj%V{m3&(Crgvl8J366Wg|JPi)(^ZQHh!iEZ1qb@P7Tty{P2^xoBLuhV@_Rafsn z%SBd11ONp1F}h5^@BenW!k_qmkpH6p-$dnP6#)PsYybc>4FG^#^M&;El~tr=006*R ze|Sni{?}6dRN25v&lUgx|H%gA2><{SHllR+n&>(FU=X1{9LWFc@FtdS#y?mr06>@v z0C4)^sITgn8tEAV02FjT9G3t5QTex_=@0mWnf}DMKTZsp4a#L|<>>l@{p15i0{}ow zpC4c%Eo}^bcvQJRbt`_n`Q__$&Pvbqr!I}u&wKd)Iq(fIqP3ot(GO<%6X*Z%sE~4L zt!-@_9034Y_@6lZCnsq!fx>|+kXI5&EY&D#@b_8u*;$=kl-;A+b=4)#^jfpkdNYC= zR-Uq{oz*sT_2n9EJ)`*&!maX?A@0ZV!YC$tJOi`Fqx}@uRewWLBBi*VsZ1uL3`Y+4 z?f4Fb@e;9?B^Bpmf#TK0rPkTTwI{+l&k~jg2MalFL`;+o9xto6!$;rC`z_3s?K9uA z?X$8=u<8}qR`u5GR@Bz|7NL!1K3kpF-on4dFcT!kkHYPf@9pi??fLEP>qoe!n1`5W z*~jT8+6UU_7UE|a9f}aoYOdEmBYZB&^+^RY4m;ctpJIrY>3%nMKtRG8Nz0Q_>qZg>ti%$vNU#jf$@U`*<{_MHX zTbQxw4h-ZV-*67DO?)wpNNDoYCx$dIWjbZnWyTyLGV3i)xI>$)s>#YBAPc&=5-uUt zb_1Oo)OiD)&)0b?Y41X`2X%qso-2V4(qWqep@^IZnRcDiYM>Q7(oyx0Y;hFakK8aw z6D|K~Q*(<2;4$Ds*Aqb46Tsc&`C>xdvdHKCbKKQTO5?~722bp%#QcJlix+Ux__Q#G zG0FWhmp-0oh^t~vSCAu4DS`NPL+}eD_Lq9BV2?dP%(fXfy7yb@66a>1AV~gpr4|slnAEkfGq{ zqKaUBbXa2!=|tKE{zOkknPy*Om=|T;^{_!!Q}Z=SMDWNySyTO9 zk@mp!jb|X|Blat7as&z43`+Op6`>oZ$<~K)#-SUIIBCvFrrNxkvn(&X7S?m3P?WN0stB26Lvh`w@}{ zV{)nQP-*cX2O?L!RdbTHu?s_oF6F?PI-z-khRRw^^yBbcdGuR{I?&3x-{P1fSJFz2 zuw#YPVFo+9vbcU_6Yh zIDzVdyx6lvRhe7SWmi|5b8#@oeTM!Tm~=-lOY%OrH6?SaFUl@%Gq^=i8j_KSjF;PZs(%G7sB! z{-;`Ki;p`F#NG4#4qs1;mvy?E2FiZIe96d6{EQwgwJEPH*XAg81!)j>GnXViAnuPD zXFr3>70BSPF_d0rZ|_)dZ(~SVgr8p?gaG4~5uN@E7_!k{BNH%rDwyBDUxD#akwJyP z!5G=-=)5^6`tNNRK|rA43TWk05r~Hi2PVJ@k3Yn~h|At+hhFj(2Li0Pn`NlaxB&33HU@Qe@v}m;uLY;^kDg{(hFlE0>|Fi!B zKIaG=RKRw?oyj!(ACEc`8y1q0-}br}Ha1HQOI#Q)hPMeUF%mno-!b8ML4gPD!lpoc zrYfemzME?NLq#EYMY7TAX6$i9F6&HBlWoNp|N(m*!-%Ya`R_vq^afD0j zTXARYE=Cgf*`PL~Pu+&{>eb?iWIDl)VQb}iSySyVlPx@VNuIitcWDA(-Op{ZU_3WL zF|}la32^!%A$YfhxwFmx&EiY$m>WM`;#* zK}VW~<@SGXaT3#ee{KS;wtf?nF^<@eGuT7 zwXvcQm~2z|4G>W+WZ{R{6J*rF3=Ydiv%}xF7A$`!+5I11y(-1vLu)~fq zt8TR%P=>PB^$?kTt~ zu0D;@qEMmtqbBo-<3mGqR{IiVzhH_gohgWvIDwG@J+hkNHS|%~>w>x5M+u$nz<@<% z5Dx~k!(zzSn6=$@NE~O>a9EtNki~ax_x?3vSi!N>$;4%PwTb|N`_G~SffDLoVPhs5 z(aI!P2Wh4vrA?3;J@#QyZDX^tbS{}v-svuMVFN8>fx!sMoN=Qvs97APkdf6=O6?Lq zJeN-IUy!Xhky413;Mu1k33nn29Z^-5{N$V4ITC6|EOhuH#nJvgS$STT(5a+$rYu8k zV3u7Rsi;^ebS%_K4=neYhI(6E-RC$ekMQJbA!rmBD;&?b%0!kibA|JePK{Zl6VwF3 zPG76MGD$qLZz67wh5Z1FQ4|MnPHhT5G^5EG*+h^f*Bxls#+pcEKC#r8$tl6!B-(@Z zI=4^7VjP|xuApeB{ls8-4=8ryXBjGr^c7Al=75~x7qz7ja3O@dR9;7Q_xTumHLeo2N6{#> z;7*D%=*&bzKm#E~Ebte20ywK@$r@G?Zn9xAp&QCZ154zztl4ZtrrLF%Qa=74?KX1H zivV$=M$pG0MKdA>Sh|rDxS;=L_Zr%%FkH!)iOcffk@}^wK6y;L?@STnQltq?n`-1J zDU($KnxKw;IB-SCp>aszANIOcoYoGb@nW|OoJH47SZniZWZq>@f%Sf`56MB;nvzn} zi?OsiN*e2~aGznfhDl??NNXPM7M8Zwt#LQ&CpzPyOj%9Ixd=?~XzFI(Ap!QddYZdK z%p2TFaFKA(tTg$a;^%;~uYv3?D|um*7cV?NyYLW8^$5dE>zIo}!`HbI5Jd<|`dltc*=oZ)eF}=xpGWQ$~BKB%2$juE&5FS$C`D z%Jy#PHBOgC5$bGT=51X=PPhFJgCh}7np4Cq>&yBslalu5Mw*kHBj!0aAHAC^1Oehd zGeAUft!s5iGCz%zfYnf8Y>kr->$3S?T7ZHbZzb@gUeX2Bf#dT)XXtXN6 z2Ld+|rsEK@v2D+pQ3+XLgveD&~L|Xi*Y*X>$i+oeD zw}1wyjr!0;j0k2$SIv&XI~b% zF!1Qcraez+AVU~z8_pY$@Vpz;Skp!NQ>_?g_Q? z8$I;UqW%Ja4Y%#;#?qj30>~G(Z5)6bKCKo<6MGf*FT?#K`EKC>irN@=niP(mg{7^@ z)4qAzW?nbdgyqmIm9?(>!V(gV&=dd@FaN3|y{dWq$HT(xR%Y%-d;O|Ub_VquspK=w z)1+j`2Jli>a4!7BB^o+zuY`$jE$x})`*yc7hxy$fjb-yprWkVnvC3#tWo661Fn=qQ zrA5Q}3R(hdg}}%Nxe5Co0?Z}tcVIgW`e{D7t>C=!Hdh7)rlT2}#qLn-sK+EB0=I2C zmn9lkt&c$EE#R9^gH@lG>Uljel4LQCD<5eLD|=3by!w(8ul|*P_n~jCHICyFJKMM2 zoSj=jJDTsw?+A*Co*iWh`OIz^CLf$TL?sBVeuC{`_ir%JYAdyje339KD2-WOWXWu7 zs*5F~JZmF*l;9+THxFwxqK+Gs%dm|&)w0b%ziJ>IUVO zLyYT-#_iL(a8b3DA>7KZ$jb9i1-n|P?JkQu^24yHH`k9|f#vHpJ?3!7>=arI`5V&{ z)p{fnf^7p^ZOC4`Pto&w-QE|MF$50V-uT`_On*5Sel2Bgd$lqr)Z zdmz_znjUO~6m5~&6kTuj7r|cU<%$M?D6!vVXF|$W+w1Hkr+D3LBZQ`)5NNeW7)9^r zM7cB81x1as990ISw%V1;a7Bey^oLSFh2zuz;jxAj6-D?I7SHr0O4NDVI^BEmI(Sz17HB`o@v?Z1`7%b-;e$x$SRIC11s`|kVdNII6QdVeD-yW?Vos!kn`@drTJ+3 ztOBA;4bor&*QjK6{q1ZM1WC5z2rL6UjPn;(s{HAio*7??lyE~c!C!}c8r6F*KK;@} z4XSJCgRhR8&A`FI^NVfk4V32`MoPryErJgEp0RPDc)M|kEKnUsx?^C}x0ddbSq=Ju zwnH$phY&y_jlg8v{)*VV z2l^SxmnHt`I!k+G$g9DQkk}9M7bqkmKAW6A72;ouVv}!$c~Uv?f(2|p5$j^vOwG3 zmT^R{>^hlpMH0aN+6=Vf&OWw+&ZX;|h!T-c3sPRla9YND(G~iFtA)aE87zPMGV4Au z*ZnI+4eE=B-LJT?aX8yOx92(MC<4^Usa0>h)Vd@7;Hi0=_{C6=s??&VBQt;`;;4rS zt;8n}E@Q85ZO`svO6bwx^3)xige$pUYLAP!2*<7NE$iX(R5ZXoL7F!(cQszG5F78# zFs>_jBemZ2V{&Z#d|^a(ZsQldhM~XF1zH7lcn^Ck5l$J26CTH zdgOYh#Jm^AtPIYpGUkAFsPSLu^Q8V-%ErL9kdl?=e!+i;Np9dutbk_T86{Z;ro=p} z<7&$4oS0sHJ)zq7OE=0%+d;f3FRzaZ>#ojLiz*0uZY)CbYU>W%VW9XrxG?{PAqC#C zlr*k1pr}|jtBi=zaKi%2086Kr`+Hx5fD`>ijEpRzRba=jqCstDG5UJ|* zO}XQBvm3Ktu%+ORtk1{(a(cmPYtNVsV;E*a?pOGf5#!5x$oblFv0zhG*a0$*BG`o+ zk|p+Az@n`{rg;D%j#@uIDP;Xe^oGuRv#a7g2-aIO>bk&qL{v~vP3yS{E9jRQOY{@+ zGbKE=RzM3?>dVvQJsI00oX?A3@K{on@y)#kP}2#d>-E`X)S|=L02a#*ec&CM8T%^h z>qA6gX4TLkRX1K0{$V?CkQ8~INyGu{fbbB`RocjQz#rvo@7%C{=|&iLB*}|JnmIg_ z$9mh^Cf~ig3f)(Es@JE{gxbL(+ON!qO&%P+F3Zd%us1VkOWtHAC_bZ8tkITP2w9y< z=@NC4t%g1wE+D6@LLcw>qNgDAf2eY^t$=;XL#R(gC4$q%PA7ipIhWflx=Vre*3C%1 z=cmZh<51!0WxK`Mjs6{H55a}ga;sV!IGU5_{JgB`O&$bG$SO`(_s8`4_iZ%GhnlWW zb~Q~OFb=Ck*!sEE&;qr%$P_`Vx+^fhOMStG!*G_+br5I=dM3xo`sIL|&HZ6T%I~a^V{^a4= zfycoR?=E=8t38cnDi>KuYYlg4XctY=*^Z*-7eimkhal2R_g+rff~cB-K~}w#=9R_? z<~d8%%)EQo9<_rwn@#Ks@_0)jXeQ^!CED&8itoQpiH@4gdv*eWa=ROuE3);fA6`})VtS#^z*2d0olZXg9fLCx__lxZeX!D9r{(Os37CBDbU{D$ zS(Y?Loe;O&KumWORIgAb%r@#fvV1D$FaFaUYsG4^w;wxvG9!-Lw-gpLqR$9;ncdDHUn))(8@{K3b_gyhZ`%s+hCVcd?PhxKsd!uaA`cIh}rf7RQno4hx)d+xFCz=6a!erg3C9`?~)!<-i4asnvg@Pxd zCh#>#8!?+Fnv>`h<%X3f{>q`vQ@S-T5Jpj(!3iLYnk^T=LKaDZzrH#VFVy7zuosms z>}q06znnYgY?z%|dJ5g4Q=%DuSL-Y(Ad7ycAIrdUMfzhya0XR!XGuY~MpdJ~G7caa zOq3@srRvQLr-Se;-?hk^R)xR=0%C;FW$HclfwXB|b;P<+F|&nC?2zf|j<8qV2W)A~ zhh;}!A#QOLEvL%b%f>&NNMvxtNYp5QT>5aV9@_(Z(p(R7=0JM;OprlS+k9vq8 z=PG|Xk6&$SMjs`&hUEmk{N*BAmEJ$kJ4>QR^7Ko+qF7g2!-N(ug^s zC+EqH&}N3Oj2Yx+wmVFnI%mqOn5k&CB&;iCR667$`%~3j+cD~dpNf*C(KK85I|i17 zStClB+c=WNS&1@AB36{78K#wI3U(V?3Yh6&wlf_2gs+Q5r%!FSF}*W$+qT zlnlpdlQ~YSe^=~)ka^Q7x`R!pMCIHqTKSC09A{daOS?H-q{Q%EnXTzMf6}GhiGPlQ z7_D(f*212D2@L^RLUZLDWm@bc*;AF6wef0>!hOw0J`*F1z%P6w|16k+A`9Gz1ljkaltHh%tBH6I`hZhqOr>54883S zjE?wMRMrVKUxVNb>U%`!L4(`jtZ2~a7&7ds*6n&VCJftg zfVJHuQb>jj|KfZrc{v+5q>8#XX*f9Rv-@}VrAL;S-&5ku!x|J1*6$z}J?ch>LEpJG z{&j1aYD;*G=}|frsga_;(Y~Y}CjlO*ii&t#FKxzd+hrMm$(ue(8oM5(oDXW;SGb8M zys4XBS?M}BFxJ0q#jVh`x{^4F8hj^Mgbp_`Mwar`)#Ex6YCfT8q`ZqojsZi2I%hpXeh zP{p5Ok&8rJtRp7OY_lv4ou0Nq9~Jvz54q0rQLaFD5$qh%X{lXIY1sRtupL<9?j0!- z$D(!oi2fu4YyFg;UGAuW{Fs+q`^WtFoC3xj*BK&c2+O8MTLVvK?Do4IbjOAt2^TUvP^Gm;84=?r;H=TaeYQnDYx;to|$+}o>9S=h% zijU~g(%YL#t7*={3HcGPR^*|rgGOT6aFzii zUp@>ye+Sp9Zp}Le3oD$|!Oh#j*XKG4bsPK7E2c0anPim8^ zhsT#S&Jw5>CROTd85J`a`4BMrzKp@whMoX&H-Ud*C{GJo?htJ(v~XDv!wYqP1H3w@?dzij)OyFEGiTDVm#+ zQ|INz)2q|VTK{ydWDEbctvzG8ndW}F3v(E*NHv|45hq0mffc=?YboxvVmU51YlsHQ zjy=~kXX|Pl#dYWv^;dD$Ddn^STmFt@X93|&hM5Q>%cfkm&r`SLUM96p$iTWnBti?H zPIQa#8$XYfr^6~_sk)JG%SW=M3f6D)hQ^{+^6yONz!!ev;4(F-I}z>%M(d1&Iw9{7 z>>%9@C#Gj-r-94tN=FNrz1B56$)SqHASNx2^0`+m@4*pvC6s_#(oHEx(o2OffE^PN zXs=b_WM|g-WQoG#!OBBSk(%$Wwv@p)sYAsUP4i42^FAKS3qjVrML<7S0I6w_8$dH_oWiw%D>Ear;AI%Lgn&-o zh-*4KL^kG@J9?LIYnY9wCE<1eHet*JPe=j-*zK4-A2`$&8xluhl8p$pan=-bq3t3- z>4Kw5Pj267fYyuwe&isu6|T!aq(f zf8b9JT>lV;vYD|mK}C2Wmfy5p`p_mjGuF_Yp{pyRX9?BT7&c`9Woq-IlzOL^DEZZ& zoz=FY7hAutrimD6*XSAXkq^(NAC^>dpI^@5e-42G zg=h4~QIZmrd@wgi9S{B6`HoAPP$gXcz5eEv`Zuh|{yk z6QQ2daaga;g&mQ}sHUx%$Do_O5+Js9WWt{v)k|kNOd~ZhLT3xYnC{%4_E^qj0sTA3YF}v-!nq z9j%x7wp7vsdHC3vdBkS;)$-Y};WcFk5{2}EHa0EDUMJ7aKFohRgf;U1cD_-;>2d{% zx$Vob9mD~)U5ojI4*3?!3djdYm|kWY0)Yc6 zEk}qQ`^BvT8}=iUG6aN|7705C=#9QFDUrv4bExDS{G|X)tB(uG2+|wqrY2=)x*qu( zWv7R>FNYB+GdqFsvnuiZ-Nt|3(?M&S{Fzb_HpJVjgX)XwBc(4-H|I~^w~*c3RaYWj zudu>4aSNv!Y>2auHmVQm$M|p9Gh<;B1w8n6t7gc6YyLkQ z3vvtftv-Ke^ZmXC@xUt5yNtGWb~@P|jy4XlZu+3#O?*XoXV(RLa|G<#K8EopeL$VJ zm4qt{U<@pzaAlWJ&Tp5PULBj)4f-f5A|6e-nRk`O%;oRynLG0+cC|ZSL_*(JD($B@ zhX20DxeH}Lm0Shp#(mx=v;mnd@?D9`d@ct)oNNlXe}!WOcxc%#)mAE^D$a#vAqZXP zYBs2w$-WVpnZdEi+62+wA#^vGh||0+gdzR?0(1oslMRo8ru}A&F-BJq^lOF0#}@2> zM$9E6?XxWHRN&g9I5tZ;Z*p{qS~cI?cTlNVTrxjpjG}ygc)nI&P5s``+N>i^(wbA( z-fSaI)0mTwa(;Z~rI*VvuTxS|6E^v#C~4LyK*J80+Ov82wlZDzB z736V*EySwEmTR+GSV+IX!?==dP7@JaTqHQnrgZr3BZzXkT3ONJpD%IlxOQ#>ul~oP zzoq|N5A;;>Tl9WmOW?d{{zA6TOvQePT+MwbOm@D)wv_-UPjxh`1clQ27q56Ux7TUV zh_-4CMW&bg-&PwoR_3ID7>M4hmv2xnUGyKF%8x`6AYkg538o9j^$36lIr%{VL#ecO z|Lx2Ep{5P%fF?8Ra|gCRY3d&X1a?do?O&Vz-AI zI}IXE>WeZ#tJM(w+v{(I4y4lI1MuyE52d9A>wX8LF}zUF>GGIlj(o>XNiy&mI|H#V zH;nNX^TTOBmmvGGB=I;&x67VX#!EA%-@hW%Gg0$6@4#g-Rx!7ilZ;Kjp*cm+-<~`ZW^RZTSB+7&p6*Ju{4u5|V7(iq8Rd zmxKc=PA^KZK9o@f#F?1F+mCIeJ*3xY5}`fg^E6 z6k9|Vc6*e*kQU|eNK;1D7jHVQChp-DUg5yKLf#LvzlmLlw0VoF-xo1H`KcF679Bn@ zupU6>@4xBZKRAtR7BN5LJqcYVr!w5W{5n2=5M`&Y6nNP}NyNF^U(FQ1Qp$mb5nnVX z%f9`tr3Q_W?om7q@(+Ldg1Qd`nV20J(Ayj({zm3v@zV(Z@8PMmQl|f%@UZCrMdYr%v=rK%$EY{+{4!E_#Oz(2K8jn+W&I{K8CATCBP|v- zv*Ad(`lP+}@%qO4+}_FobM)Z*Z!Q1;YXB7h7(f&N`acl>0QIA$1^_wyxBTd-qd=ks z<P&!W-TPKVn3~nsojcO2o5;kg#s@xnky(b&F>>@kLC#zgg(Ep|_fr=+aBaq= zAUn=ns+6X)i4C zgGe{d%7|t;Hmgxfc0Nd8niZvpgRd4P%NS!A#Yr%a8qEvRX#V>V7w$)gH7{y}ksu^X zL4^@NYRj-IDQ`+R;Mgg_Fgq=3vo=hi(k^*iPux$c=4d_kVc%;zY`XD&JdES@d^+_B zcU;B|^JHK*PY~c)HE%$J{AOCg7yQk<0a*~qG{j~Q#j*v8B+W94tSHSm;HaB9yIzZr zECe>oF9nbF5A4=@y{04s_M|ALG(j5 literal 0 HcmV?d00001 diff --git a/doc/html/default.css b/doc/html/default.css new file mode 100644 index 000000000..90a0af567 --- /dev/null +++ b/doc/html/default.css @@ -0,0 +1,204 @@ +/******************************************************************************* +Html and body +*******************************************************************************/ +html +{ + background-color: #F8F8F8; + font-family: Avenir, Corbel, sans-serif; + font-size: medium; + margin-top: 8px; + margin-left: 1%; + margin-right: 1%; + width: 98%; +} + +body +{ + margin: 0px auto; + padding: 0px; + width: 100%; +} + +@media (min-width: 1000px) +{ + body + { + width: 1000px; + } +} + +/******************************************************************************* +Link default styling +*******************************************************************************/ +a:link +{ + text-decoration: none; + color: black; +} + +a:visited +{ + text-decoration: none; + color: black; +} + +a:hover +{ + text-decoration: underline; + color: black; +} + +a:active +{ + text-decoration: none; + color: black; +} + +/******************************************************************************* +Header +*******************************************************************************/ +.header +{ + width:100%; + text-align:center; + float:left; +} + +.header-title +{ + font-size: 28pt; + font-weight: bolder; +} + +.header-subtitle +{ + position: relative; + top: -.25em; + font-size: larger; + font-weight: bold; +} + +/******************************************************************************* +Menu +*******************************************************************************/ +.menu-set +{ + text-align: center; + font-weight: 600; + border-bottom: 2px #dddddd solid; +} + +.menu-first, .menu +{ + white-space: nowrap; + display: inline; +} + +.menu +{ + margin-left: 6px; +} + +.menu-link +{ + margin-left: 2px; + margin-right: 2px; +} + +/******************************************************************************* +Section +*******************************************************************************/ +doc-install, doc-configure, doc-intro +{ + display:block; + margin-top: 8px; +} + +doc-install-header, doc-configure-header +{ + display:block; + background-color: #dddddd; + font-size: 14pt; + padding-left: 4px; + margin-bottom: 4px; +} + +/******************************************************************************* +SubSection +*******************************************************************************/ +doc-configure-section +{ + display:block; + margin-top: 8px; +} + +doc-configure-section-header +{ + display:block; + border-bottom: 2px #cccccc solid; + font-size: large; + font-weight: 500; + margin-bottom: 4px; +} + +/******************************************************************************* +SubSection2 +*******************************************************************************/ +doc-configure-key +{ + display:block; + margin-top: 8px; + margin-left: 2em; + margin-right: 2em; +} + +doc-configure-key-header +{ + display:block; + font-size: medium; + font-weight: 500; + border-bottom: 1px #dddddd solid; + margin-bottom: 4px; +} + +/******************************************************************************* +Code & Detail +*******************************************************************************/ +doc-code, doc-code-block, doc-id, doc-file, doc-function, doc-detail, +doc-setting +{ + font-family: "Lucida Console", Monaco, monospace; + font-size: smaller; +} + +doc-id, doc-file, doc-function +{ + white-space: pre; +} + +doc-code, doc-code-block, doc-detail-block +{ + background-color: #eeeeee; +} + +doc-setting, doc-file +{ + background-color: #e0e0e0; +} + +doc-code-block, doc-detail-block +{ + margin: 8px; + padding: 8px; + display:block; +} + +doc-detail +{ + display: block; +} + +doc-detail-value +{ + margin-left: .5em; +} diff --git a/lib/BackRest/Backup.pm b/lib/BackRest/Backup.pm index 9b681b8b2..5604c2073 100644 --- a/lib/BackRest/Backup.pm +++ b/lib/BackRest/Backup.pm @@ -5,8 +5,8 @@ package BackRest::Backup; use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; use File::Path qw(remove_tree); @@ -15,25 +15,28 @@ use Thread::Queue; use lib dirname($0); use BackRest::Utility; +use BackRest::Exception; +use BackRest::Config; +use BackRest::Manifest; use BackRest::File; use BackRest::Db; use Exporter qw(import); -our @EXPORT = qw(backup_init backup_thread_kill archive_push archive_xfer archive_get archive_compress +our @EXPORT = qw(backup_init backup_cleanup backup_thread_kill archive_push archive_xfer archive_get archive_compress backup backup_expire archive_list_get); my $oDb; my $oFile; -my $strType = 'incremental'; # Type of backup: full, differential (diff), incremental (incr) +my $strType; # Type of backup: full, differential (diff), incremental (incr) my $bCompress; my $bHardLink; -my $bNoChecksum; my $iThreadMax; my $iThreadLocalMax; -my $iThreadThreshold = 10; +#my $iThreadThreshold = 10; my $iSmallFileThreshold = 65536; -my $bArchiveRequired; +my $bNoStartStop; +my $bForce; my $iThreadTimeout; # Thread variables @@ -52,20 +55,20 @@ sub backup_init my $strTypeParam = shift; my $bCompressParam = shift; my $bHardLinkParam = shift; - my $bNoChecksumParam = shift; my $iThreadMaxParam = shift; - my $bArchiveRequiredParam = shift; my $iThreadTimeoutParam = shift; + my $bNoStartStopParam = shift; + my $bForceParam = shift; $oDb = $oDbParam; $oFile = $oFileParam; $strType = $strTypeParam; $bCompress = $bCompressParam; $bHardLink = $bHardLinkParam; - $bNoChecksum = $bNoChecksumParam; $iThreadMax = $iThreadMaxParam; - $bArchiveRequired = $bArchiveRequiredParam; $iThreadTimeout = $iThreadTimeoutParam; + $bNoStartStop = $bNoStartStopParam; + $bForce = $bForceParam; if (!defined($iThreadMax)) { @@ -78,6 +81,14 @@ sub backup_init } } +#################################################################################################################################### +# BACKUP_CLEANUP +#################################################################################################################################### +sub backup_cleanup +{ + undef($oFile); +} + #################################################################################################################################### # THREAD_INIT #################################################################################################################################### @@ -163,8 +174,6 @@ sub backup_thread_complete # Rejoin the threads while ($iThreadComplete < $iThreadLocalMax) { - sleep(1); - # If a timeout has been defined, make sure we have not been running longer than that if (defined($iTimeout)) { @@ -206,6 +215,9 @@ sub backup_thread_complete } } } + + # Sleep before trying again + hsleep(.1); } &log(DEBUG, 'all threads exited'); @@ -278,6 +290,7 @@ sub archive_push { my $strDbClusterPath = shift; my $strSourceFile = shift; + my $bAsync = shift; # If the source file path is not absolute then it is relative to the data path if (index($strSourceFile, '/',) != 0) @@ -296,12 +309,6 @@ sub archive_push # Determine if this is an archive file (don't want to do compression or checksum on .backup files) my $bArchiveFile = basename($strSourceFile) =~ /^[0-F]{24}$/ ? true : false; - # Append the checksum (if requested) - if ($bArchiveFile && !$bNoChecksum) - { - $strDestinationFile .= '-' . $oFile->hash(PATH_DB_ABSOLUTE, $strSourceFile); - } - # Append compression extension if ($bArchiveFile && $bCompress) { @@ -309,12 +316,15 @@ sub archive_push } # Copy the archive file - $oFile->copy(PATH_DB_ABSOLUTE, $strSourceFile, # Source file - PATH_BACKUP_ARCHIVE, $strDestinationFile, # Destination file - false, # Source is not compressed - $bArchiveFile && $bCompress, # Destination compress is configurable - undef, undef, undef, # Unused params - true); # Create path if it does not exist + $oFile->copy(PATH_DB_ABSOLUTE, $strSourceFile, # Source type/file + $bAsync ? PATH_BACKUP_ARCHIVE_OUT : PATH_BACKUP_ARCHIVE, # Destination type + $strDestinationFile, # Destination file + false, # Source is not compressed + $bArchiveFile && $bCompress, # Destination compress is configurable + undef, undef, undef, # Unused params + true, # Create path if it does not exist + undef, undef, # User and group + $bArchiveFile); # Append checksum if archive file } #################################################################################################################################### @@ -338,7 +348,7 @@ sub archive_xfer foreach my $strFile (sort(keys $oManifestHash{name})) { - if ($strFile =~ /^[0-F]{16}\/[0-F]{24}.*/) + if ($strFile =~ /^[0-F]{24}.*/ || $strFile =~ /^[0-F]{8}\.history$/) { push @stryFile, $strFile; @@ -366,34 +376,12 @@ sub archive_xfer return 0; } - $0 = "${strCommand} archive-push-async " . substr($stryFile[0], 17, 24) . '-' . substr($stryFile[scalar @stryFile - 1], 17, 24); + # Modify process name to indicate async archiving + $0 = "${strCommand} archive-push-async " . $stryFile[0] . '-' . $stryFile[scalar @stryFile - 1]; # Output files to be moved to backup &log(INFO, "archive to be copied to backup total ${lFileTotal}, size " . file_size_format($lFileSize)); - # # Init the thread variables - # $iThreadLocalMax = thread_init(int($lFileTotal / $iThreadThreshold) + 1); - # my $iThreadIdx = 0; - # - # &log(DEBUG, "actual threads ${iThreadLocalMax}/${iThreadMax}"); - # - # # Distribute files among the threads - # foreach my $strFile (sort @stryFile) - # { - # $oThreadQueue[$iThreadIdx]->enqueue($strFile); - # - # $iThreadIdx = ($iThreadIdx + 1 == $iThreadLocalMax) ? 0 : $iThreadIdx + 1; - # } - # - # # End each thread queue and start the thread - # for ($iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) - # { - # $oThreadQueue[$iThreadIdx]->enqueue(undef); - # $oThread[$iThreadIdx] = threads->create(\&archive_pull_copy_thread, $iThreadIdx, $strArchivePath); - # } - # - # backup_thread_complete($iThreadTimeout); - # Transfer each file foreach my $strFile (sort @stryFile) { @@ -435,143 +423,10 @@ sub archive_xfer unlink($strArchiveFile) or confess &log(ERROR, "unable to remove ${strArchiveFile}"); } - # Find the archive paths that need to be removed - my $strPathMax = substr((sort {$b cmp $a} @stryFile)[0], 0, 16); - - &log(DEBUG, "local archive path max = ${strPathMax}"); - - foreach my $strPath ($oFile->list(PATH_DB_ABSOLUTE, $strArchivePath, "^[0-F]{16}\$")) - { - if ($strPath lt $strPathMax) - { - &log(DEBUG, "removing local archive path ${strPath}"); - rmdir($strArchivePath . '/' . $strPath) or &log(WARN, "unable to remove archive path ${strPath}, is it empty?"); - } - - # If the dir is not empty check if the files are in the manifest - # If they are error - there has been some issue - # If not, they are new - continue processing without error - they'll be picked up on the next run - } - # Return number of files indicating that processing should continue return $lFileTotal; } -# sub archive_pull_copy_thread -# { -# my @args = @_; -# -# my $iThreadIdx = $args[0]; -# my $strArchivePath = $args[1]; -# -# my $oFileThread = $oFile->clone($iThreadIdx); # Thread local file object -# -# # When a KILL signal is received, immediately abort -# $SIG{'KILL'} = sub {threads->exit();}; -# -# while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue()) -# { -# &log(INFO, "thread ${iThreadIdx} backing up archive file ${strFile}"); -# -# my $strArchiveFile = "${strArchivePath}/${strFile}"; -# -# # Copy the file -# $oFileThread->file_copy(PATH_DB_ABSOLUTE, $strArchiveFile, -# PATH_BACKUP_ARCHIVE, basename($strFile), -# undef, undef, -# undef); # cannot set permissions remotely yet $oFile->{strDefaultFilePermission}); -# -# # Remove the source archive file -# unlink($strArchiveFile) or confess &log(ERROR, "unable to remove ${strArchiveFile}"); -# } -# } - -sub archive_compress -{ - my $strArchivePath = shift; - my $strCommand = shift; - my $iFileCompressMax = shift; - - # Load the archive manifest - all the files that need to be pushed - my %oManifestHash = $oFile->manifest_get(PATH_DB_ABSOLUTE, $strArchivePath); - - # Get all the files to be compressed and calculate the total size - my @stryFile; - my $lFileSize = 0; - my $lFileTotal = 0; - - foreach my $strFile (sort(keys $oManifestHash{name})) - { - if ($strFile =~ /^[0-F]{16}\/[0-F]{24}(\-[0-f]+){0,1}$/) - { - push @stryFile, $strFile; - - $lFileSize += $oManifestHash{name}{"${strFile}"}{size}; - $lFileTotal++; - - if ($lFileTotal >= $iFileCompressMax) - { - last; - } - } - } - - if ($lFileTotal == 0) - { - &log(DEBUG, 'no archive logs to be compressed'); - - return; - } - - $0 = "${strCommand} archive-compress-async " . substr($stryFile[0], 17, 24) . '-' . substr($stryFile[scalar @stryFile - 1], 17, 24); - - # Output files to be compressed - &log(INFO, "archive to be compressed total ${lFileTotal}, size " . file_size_format($lFileSize)); - - # Init the thread variables - $iThreadLocalMax = thread_init(int($lFileTotal / $iThreadThreshold) + 1); - my $iThreadIdx = 0; - - # Distribute files among the threads - foreach my $strFile (sort @stryFile) - { - $oThreadQueue[$iThreadIdx]->enqueue($strFile); - - $iThreadIdx = ($iThreadIdx + 1 == $iThreadLocalMax) ? 0 : $iThreadIdx + 1; - } - - # End each thread queue and start the thread - for ($iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) - { - $oThreadQueue[$iThreadIdx]->enqueue(undef); - $oThread[$iThreadIdx] = threads->create(\&archive_pull_compress_thread, $iThreadIdx, $strArchivePath); - } - - # Complete the threads - backup_thread_complete($iThreadTimeout); -} - -sub archive_pull_compress_thread -{ - my @args = @_; - - my $iThreadIdx = $args[0]; - my $strArchivePath = $args[1]; - - my $oFileThread = $oFile->clone($iThreadIdx); # Thread local file object - - # When a KILL signal is received, immediately abort - $SIG{'KILL'} = sub {threads->exit();}; - - while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue()) - { - &log(INFO, "thread ${iThreadIdx} compressing archive file ${strFile}"); - - # Compress the file - $oFileThread->file_compress(PATH_DB_ABSOLUTE, "${strArchivePath}/${strFile}"); - } -} - #################################################################################################################################### # BACKUP_REGEXP_GET - Generate a regexp depending on the backups that need to be found #################################################################################################################################### @@ -640,14 +495,15 @@ sub backup_type_find { my $strType = shift; my $strBackupClusterPath = shift; + my $strDirectory; - if ($strType eq 'incremental') + if ($strType eq BACKUP_TYPE_INCR) { $strDirectory = ($oFile->list(PATH_BACKUP_CLUSTER, undef, backup_regexp_get(1, 1, 1), 'reverse'))[0]; } - if (!defined($strDirectory) && $strType ne 'full') + if (!defined($strDirectory) && $strType ne BACKUP_TYPE_FULL) { $strDirectory = ($oFile->list(PATH_BACKUP_CLUSTER, undef, backup_regexp_get(1, 0, 0), 'reverse'))[0]; } @@ -661,7 +517,8 @@ sub backup_type_find sub backup_file_not_in_manifest { my $strPathType = shift; - my $oManifestRef = shift; + my $oManifest = shift; + my $oAbortedManifest = shift; my %oFileHash; $oFile->manifest($strPathType, undef, \%oFileHash); @@ -683,9 +540,9 @@ sub backup_file_not_in_manifest if ($strBasePath eq $strName) { - my $strSection = $strBasePath eq 'tablespace' ? 'base:tablespace' : "${strBasePath}:path"; + my $strSection = $strBasePath eq 'tablespace' ? 'backup:tablespace' : "${strBasePath}:path"; - if (defined(${$oManifestRef}{"${strSection}"})) + if ($oManifest->test($strSection)) { next; } @@ -705,7 +562,7 @@ sub backup_file_not_in_manifest if ($strTablespace eq $strPath) { - if (defined(${$oManifestRef}{"${strSection}:path"})) + if ($oManifest->test("${strSection}:path")) { next; } @@ -718,21 +575,28 @@ sub backup_file_not_in_manifest if ($cType eq 'd') { - if (defined(${$oManifestRef}{"${strSection}:path"}{"${strPath}"})) + if ($oManifest->test("${strSection}:path", "${strPath}")) { next; } } else { - if (defined(${$oManifestRef}{"${strSection}:file"}{"${strPath}"})) + if ($oManifest->test("${strSection}:file", "${strPath}")) { - if (${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{size} == + if ($oManifest->get("${strSection}:file", $strPath, MANIFEST_SUBKEY_SIZE) == $oFileHash{name}{"${strName}"}{size} && - ${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{modification_time} == + $oManifest->get("${strSection}:file", $strPath, MANIFEST_SUBKEY_MODIFICATION_TIME) == $oFileHash{name}{"${strName}"}{modification_time}) { - ${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{exists} = true; + my $strChecksum = $oAbortedManifest->get("${strSection}:file", $strPath, MANIFEST_SUBKEY_CHECKSUM, false); + + if (defined($strChecksum)) + { + $oManifest->set("${strSection}:file", $strPath, MANIFEST_SUBKEY_CHECKSUM, $strChecksum); + } + + $oManifest->set("${strSection}:file", $strPath, MANIFEST_SUBKEY_EXISTS, true); next; } } @@ -753,7 +617,8 @@ sub backup_file_not_in_manifest #################################################################################################################################### sub backup_tmp_clean { - my $oManifestRef = shift; + my $oManifest = shift; + my $oAbortedManifest = shift; &log(INFO, 'cleaning backup tmp path'); @@ -770,7 +635,7 @@ sub backup_tmp_clean } # Get the list of files that should be deleted from temp - my @stryFile = backup_file_not_in_manifest(PATH_BACKUP_TMP, $oManifestRef); + my @stryFile = backup_file_not_in_manifest(PATH_BACKUP_TMP, $oManifest, $oAbortedManifest); foreach my $strFile (sort {$b cmp $a} @stryFile) { @@ -791,126 +656,6 @@ sub backup_tmp_clean } } -#################################################################################################################################### -# BACKUP_MANIFEST_BUILD - Create the backup manifest -#################################################################################################################################### -sub backup_manifest_build -{ - my $strDbClusterPath = shift; - my $oBackupManifestRef = shift; - my $oLastManifestRef = shift; - my $oTablespaceMapRef = shift; - my $strLevel = shift; - - if (!defined($strLevel)) - { - $strLevel = 'base'; - } - - my %oManifestHash; - - $oFile->manifest(PATH_DB_ABSOLUTE, $strDbClusterPath, \%oManifestHash); - - foreach my $strName (sort(keys $oManifestHash{name})) - { - # Skip certain files during backup - if ($strName =~ /^pg\_xlog\/.*/ || # pg_xlog/ - this will be reconstructed - $strName =~ /^postmaster\.pid$/) # postmaster.pid - to avoid confusing postgres when restoring - { - next; - } - - my $cType = $oManifestHash{name}{"${strName}"}{type}; - my $strLinkDestination = $oManifestHash{name}{"${strName}"}{link_destination}; - my $strSection = "${strLevel}:path"; - - if ($cType eq 'f') - { - $strSection = "${strLevel}:file"; - } - elsif ($cType eq 'l') - { - $strSection = "${strLevel}:link"; - } - elsif ($cType ne 'd') - { - confess &log(ASSERT, "unrecognized file type $cType for file $strName"); - } - - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{user} = $oManifestHash{name}{"${strName}"}{user}; - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{group} = $oManifestHash{name}{"${strName}"}{group}; - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{permission} = $oManifestHash{name}{"${strName}"}{permission}; - - if ($cType eq 'f') - { - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{size} = $oManifestHash{name}{"${strName}"}{size}; - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{modification_time} = $oManifestHash{name}{"${strName}"}{modification_time}; - } - - if ($cType eq 'f') - { - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{inode} = $oManifestHash{name}{"${strName}"}{inode}; - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{size} = $oManifestHash{name}{"${strName}"}{size}; - - if (defined(${$oLastManifestRef}{"${strSection}"}{"${strName}"}{size}) && - defined(${$oLastManifestRef}{"${strSection}"}{"${strName}"}{inode}) && - defined(${$oLastManifestRef}{"${strSection}"}{"${strName}"}{modification_time})) - { - if (${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{size} == - ${$oLastManifestRef}{"${strSection}"}{"${strName}"}{size} && - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{inode} == - ${$oLastManifestRef}{"${strSection}"}{"${strName}"}{inode} && - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{modification_time} == - ${$oLastManifestRef}{"${strSection}"}{"${strName}"}{modification_time}) - { - if (defined(${$oLastManifestRef}{"${strSection}"}{"${strName}"}{reference})) - { - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{reference} = - ${$oLastManifestRef}{"${strSection}"}{"${strName}"}{reference}; - } - else - { - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{reference} = - ${$oLastManifestRef}{backup}{label}; - } - - my $strReference = ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{reference}; - - if (!defined(${$oBackupManifestRef}{backup}{reference})) - { - ${$oBackupManifestRef}{backup}{reference} = $strReference; - } - else - { - if (${$oBackupManifestRef}{backup}{reference} !~ /$strReference/) - { - ${$oBackupManifestRef}{backup}{reference} .= ",${strReference}"; - } - } - } - } - } - - if ($cType eq 'l') - { - ${$oBackupManifestRef}{"${strSection}"}{"${strName}"}{link_destination} = - $oManifestHash{name}{"${strName}"}{link_destination}; - - if (index($strName, 'pg_tblspc/') == 0 && $strLevel eq 'base') - { - my $strTablespaceOid = basename($strName); - my $strTablespaceName = ${$oTablespaceMapRef}{oid}{"${strTablespaceOid}"}{name}; - - ${$oBackupManifestRef}{"${strLevel}:tablespace"}{"${strTablespaceName}"}{oid} = $strTablespaceOid; - ${$oBackupManifestRef}{"${strLevel}:tablespace"}{"${strTablespaceName}"}{path} = $strLinkDestination; - - backup_manifest_build($strLinkDestination, $oBackupManifestRef, $oLastManifestRef, - $oTablespaceMapRef, "tablespace:${strTablespaceName}"); - } - } - } -} - #################################################################################################################################### # BACKUP_FILE - Performs the file level backup # @@ -919,8 +664,8 @@ sub backup_manifest_build #################################################################################################################################### sub backup_file { - my $strDbClusterPath = shift; # Database base data path - my $oBackupManifestRef = shift; # Manifest for the current backup + my $strDbClusterPath = shift; # Database base data path + my $oBackupManifest = shift; # Manifest for the current backup # Variables used for parallel copy my $lTablespaceIdx = 0; @@ -931,15 +676,13 @@ sub backup_file my $lFileSmallTotal = 0; # Decide if all the paths will be created in advance - my $bPathCreate = $bHardLink || $strType eq 'full'; + my $bPathCreate = $bHardLink || $strType eq BACKUP_TYPE_FULL; # Iterate through the path sections of the manifest to backup - my $strSectionPath; - - foreach $strSectionPath (sort(keys $oBackupManifestRef)) + foreach my $strSectionPath ($oBackupManifest->keys()) { # Skip non-path sections - if ($strSectionPath !~ /\:path$/) + if ($strSectionPath !~ /\:path$/ || $strSectionPath =~ /^backup\:path$/) { next; } @@ -965,7 +708,8 @@ sub backup_file { $lTablespaceIdx++; my $strTablespaceName = (split(':', $strSectionPath))[1]; - $strBackupSourcePath = ${$oBackupManifestRef}{'base:tablespace'}{"${strTablespaceName}"}{path}; + $strBackupSourcePath = $oBackupManifest->get(MANIFEST_SECTION_BACKUP_TABLESPACE, $strTablespaceName, + MANIFEST_SUBKEY_PATH); $strBackupDestinationPath = "tablespace/${strTablespaceName}"; $strSectionFile = "tablespace:${strTablespaceName}:file"; @@ -976,7 +720,8 @@ sub backup_file $oFile->link_create(PATH_BACKUP_TMP, ${strBackupDestinationPath}, PATH_BACKUP_TMP, - 'base/pg_tblspc/' . ${$oBackupManifestRef}{'base:tablespace'}{"${strTablespaceName}"}{oid}, + 'base/pg_tblspc/' . $oBackupManifest->get(MANIFEST_SECTION_BACKUP_TABLESPACE, $strTablespaceName, + MANIFEST_SUBKEY_LINK), false, true); } } @@ -988,46 +733,39 @@ sub backup_file # Create all the sub paths if this is a full backup or hardlinks are requested if ($bPathCreate) { - my $strPath; - - foreach $strPath (sort(keys ${$oBackupManifestRef}{"${strSectionPath}"})) + foreach my $strPath ($oBackupManifest->keys($strSectionPath)) { - if (defined(${$oBackupManifestRef}{"${strSectionPath}"}{"${strPath}"}{exists})) - { - &log(TRACE, "path ${strPath} already exists from previous backup attempt"); - ${$oBackupManifestRef}{"${strSectionPath}"}{"${strPath}"}{exists} = undef; - } - else - { - $oFile->path_create(PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strPath}", - ${$oBackupManifestRef}{"${strSectionPath}"}{"${strPath}"}{permission}); - } + $oFile->path_create(PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strPath}", undef, true); } } # Possible for the path section to exist with no files (i.e. empty tablespace) - if (!defined(${$oBackupManifestRef}{"${strSectionFile}"})) + if (!$oBackupManifest->test($strSectionFile)) { next; } # Iterate through the files for each backup source path - my $strFile; - - foreach $strFile (sort(keys ${$oBackupManifestRef}{"${strSectionFile}"})) + foreach my $strFile ($oBackupManifest->keys($strSectionFile)) { my $strBackupSourceFile = "${strBackupSourcePath}/${strFile}"; - if (defined(${$oBackupManifestRef}{"${strSectionFile}"}{"${strFile}"}{exists})) + my $bProcess = false; + my $bProcessChecksumOnly = false; + + if ($oBackupManifest->test($strSectionFile, $strFile, MANIFEST_SUBKEY_EXISTS, true)) { &log(TRACE, "file ${strFile} already exists from previous backup attempt"); - ${$oBackupManifestRef}{"${strSectionPath}"}{"${strFile}"}{exists} = undef; + $oBackupManifest->remove($strSectionFile, $strFile, MANIFEST_SUBKEY_EXISTS); + + $bProcess = !$oBackupManifest->test($strSectionFile, $strFile, MANIFEST_SUBKEY_CHECKSUM); + $bProcessChecksumOnly = $bProcess; } else { # If the file has a reference it does not need to be copied since it can be retrieved from the referenced backup. # However, if hard-linking is turned on the link will need to be created - my $strReference = ${$oBackupManifestRef}{"${strSectionFile}"}{"${strFile}"}{reference}; + my $strReference = $oBackupManifest->get($strSectionFile, $strFile, MANIFEST_SUBKEY_REFERENCE, false); if (defined($strReference)) { @@ -1043,33 +781,41 @@ sub backup_file # Else copy/compress the file and generate a checksum else { - my $lFileSize = ${$oBackupManifestRef}{"${strSectionFile}"}{"${strFile}"}{size}; - - # Setup variables needed for threaded copy - $lFileTotal++; - $lFileLargeSize += $lFileSize > $iSmallFileThreshold ? $lFileSize : 0; - $lFileLargeTotal += $lFileSize > $iSmallFileThreshold ? 1 : 0; - $lFileSmallSize += $lFileSize <= $iSmallFileThreshold ? $lFileSize : 0; - $lFileSmallTotal += $lFileSize <= $iSmallFileThreshold ? 1 : 0; - - # Load the hash used by threaded copy - my $strKey = sprintf('ts%012x-fs%012x-fn%012x', $lTablespaceIdx, - $lFileSize, $lFileTotal); - - $oFileCopyMap{"${strKey}"}{db_file} = $strBackupSourceFile; - $oFileCopyMap{"${strKey}"}{file_section} = $strSectionFile; - $oFileCopyMap{"${strKey}"}{file} = ${strFile}; - $oFileCopyMap{"${strKey}"}{backup_file} = "${strBackupDestinationPath}/${strFile}"; - $oFileCopyMap{"${strKey}"}{size} = $lFileSize; - $oFileCopyMap{"${strKey}"}{modification_time} = - ${$oBackupManifestRef}{"${strSectionFile}"}{"${strFile}"}{modification_time}; + $bProcess = true; } } + + if ($bProcess) + { + my $lFileSize = $oBackupManifest->get($strSectionFile, $strFile, MANIFEST_SUBKEY_SIZE); + + # Setup variables needed for threaded copy + $lFileTotal++; + $lFileLargeSize += $lFileSize > $iSmallFileThreshold ? $lFileSize : 0; + $lFileLargeTotal += $lFileSize > $iSmallFileThreshold ? 1 : 0; + $lFileSmallSize += $lFileSize <= $iSmallFileThreshold ? $lFileSize : 0; + $lFileSmallTotal += $lFileSize <= $iSmallFileThreshold ? 1 : 0; + + # Load the hash used by threaded copy + my $strKey = sprintf('ts%012x-fs%012x-fn%012x', $lTablespaceIdx, + $lFileSize, $lFileTotal); + + $oFileCopyMap{"${strKey}"}{db_file} = $strBackupSourceFile; + $oFileCopyMap{"${strKey}"}{file_section} = $strSectionFile; + $oFileCopyMap{"${strKey}"}{file} = ${strFile}; + $oFileCopyMap{"${strKey}"}{backup_file} = "${strBackupDestinationPath}/${strFile}"; + $oFileCopyMap{"${strKey}"}{size} = $lFileSize; + $oFileCopyMap{"${strKey}"}{modification_time} = + $oBackupManifest->get($strSectionFile, $strFile, MANIFEST_SUBKEY_MODIFICATION_TIME); + $oFileCopyMap{"${strKey}"}{checksum_only} = $bProcessChecksumOnly; + $oFileCopyMap{"${strKey}"}{checksum} = + $oBackupManifest->get($strSectionFile, $strFile, MANIFEST_SUBKEY_CHECKSUM, false); + } } } # Build the thread queues - $iThreadLocalMax = thread_init(int($lFileTotal / $iThreadThreshold) + 1); + $iThreadLocalMax = thread_init($iThreadMax); &log(DEBUG, "actual threads ${iThreadLocalMax}/${iThreadMax}"); # Initialize the thread size array @@ -1132,139 +878,201 @@ sub backup_file } } - # End each thread queue and start the backu_file threads - for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) + if ($iThreadLocalMax > 1) { - # Output info about how much work each thread is going to do - &log(DEBUG, "thread ${iThreadIdx} large total $oyThreadData[$iThreadIdx]{large_total}, " . - "size $oyThreadData[$iThreadIdx]{large_size}"); - &log(DEBUG, "thread ${iThreadIdx} small total $oyThreadData[$iThreadIdx]{small_total}, " . - "size $oyThreadData[$iThreadIdx]{small_size}"); - - # End each queue - $oThreadQueue[$iThreadIdx]->enqueue(undef); - - # Start the thread - $oThread[$iThreadIdx] = threads->create(\&backup_file_thread, $iThreadIdx, !$bNoChecksum, !$bPathCreate, - $oyThreadData[$iThreadIdx]{size}); - } - - # Wait for the threads to complete - backup_thread_complete($iThreadTimeout); - - # Read the messages that we passed back from the threads. These should be two types: - # 1) remove - files that were skipped because they were removed from the database during backup - # 2) checksum - file checksums calculated by the threads - for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) - { - while (my $strMessage = $oMasterQueue[$iThreadIdx]->dequeue_nb()) + # End each thread queue and start the backup_file threads + for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) { - &log (DEBUG, "message received in master queue: ${strMessage}"); + # Output info about how much work each thread is going to do + &log(DEBUG, "thread ${iThreadIdx} large total $oyThreadData[$iThreadIdx]{large_total}, " . + "size $oyThreadData[$iThreadIdx]{large_size}"); + &log(DEBUG, "thread ${iThreadIdx} small total $oyThreadData[$iThreadIdx]{small_total}, " . + "size $oyThreadData[$iThreadIdx]{small_size}"); - # Split the message. Currently using | as the split character. Not ideal, but it will do for now. - my @strSplit = split(/\|/, $strMessage); + # Start the thread + $oThread[$iThreadIdx] = threads->create(\&backup_file_thread, true, $iThreadIdx, !$bPathCreate, + $oyThreadData[$iThreadIdx]{size}, $oBackupManifest); + } - my $strCommand = $strSplit[0]; # Command to execute on a file - my $strFileSection = $strSplit[1]; # File section where the file is located - my $strFile = $strSplit[2]; # The file to act on + # Wait for the threads to complete + backup_thread_complete($iThreadTimeout); - # These three parts are required - if (!defined($strCommand) || !defined($strFileSection) || !defined($strFile)) + # Read the messages that we passed back from the threads. These should be two types: + # 1) remove - files that were skipped because they were removed from the database during backup + # 2) checksum - file checksums calculated by the threads + for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) + { + while (my $strMessage = $oMasterQueue[$iThreadIdx]->dequeue_nb()) { - confess &log(ASSERT, 'thread messages must have strCommand, strFileSection and strFile defined'); - } + &log (DEBUG, "message received in master queue: ${strMessage}"); - &log (DEBUG, "command = ${strCommand}, file_section = ${strFileSection}, file = ${strFile}"); + # Split the message. Currently using | as the split character. Not ideal, but it will do for now. + my @strSplit = split(/\|/, $strMessage); - # If command is 'remove' then mark the skipped file in the manifest - if ($strCommand eq 'remove') - { - delete ${$oBackupManifestRef}{"${strFileSection}"}{"${strFile}"}; + my $strCommand = $strSplit[0]; # Command to execute on a file + my $strFileSection = $strSplit[1]; # File section where the file is located + my $strFile = $strSplit[2]; # The file to act on - &log (INFO, "removed file ${strFileSection}:${strFile} from the manifest (it was removed by db during backup)"); - } - # If command is 'checksum' then record the checksum in the manifest - elsif ($strCommand eq 'checksum') - { - my $strChecksum = $strSplit[3]; # File checksum calculated by the thread - - # Checksum must be defined - if (!defined($strChecksum)) + # These three parts are required + if (!defined($strCommand) || !defined($strFileSection) || !defined($strFile)) { - confess &log(ASSERT, 'thread checksum messages must have strChecksum defined'); + confess &log(ASSERT, 'thread messages must have strCommand, strFileSection and strFile defined'); } - ${$oBackupManifestRef}{"${strFileSection}"}{"${strFile}"}{checksum} = $strChecksum; + &log (DEBUG, "command = ${strCommand}, file_section = ${strFileSection}, file = ${strFile}"); - # Log the checksum - &log (DEBUG, "write checksum ${strFileSection}:${strFile} into manifest: ${strChecksum}"); + # If command is 'remove' then mark the skipped file in the manifest + if ($strCommand eq 'remove') + { + $oBackupManifest->remove($strFileSection, $strFile); + + &log (INFO, "removed file ${strFileSection}:${strFile} from the manifest (it was removed by db during backup)"); + } + # If command is 'checksum' then record the checksum in the manifest + elsif ($strCommand eq 'checksum') + { + my $strChecksum = $strSplit[3]; # File checksum calculated by the thread + my $lFileSize = $strSplit[4]; # File size calculated by the thread + + # Checksum must be defined + if (!defined($strChecksum)) + { + confess &log(ASSERT, 'thread checksum messages must have strChecksum defined'); + } + + # Checksum must be defined + if (!defined($lFileSize)) + { + confess &log(ASSERT, 'thread checksum messages must have lFileSize defined'); + } + + $oBackupManifest->set($strFileSection, $strFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksum); + $oBackupManifest->set($strFileSection, $strFile, MANIFEST_SUBKEY_SIZE, $lFileSize + 0); + + # Log the checksum + &log (DEBUG, "write checksum ${strFileSection}:${strFile} into manifest: ${strChecksum} (${lFileSize})"); + } } } } + else + { + &log(DEBUG, "starting backup in main process"); + backup_file_thread(false, 0, !$bPathCreate, $oyThreadData[0]{size}, $oBackupManifest); + } } sub backup_file_thread { - my @args = @_; + my $bMulti = shift; # Is this thread one of many? + my $iThreadIdx = shift; # Defines the index of this thread + my $bPathCreate = shift; # Should paths be created automatically? + my $lSizeTotal = shift; # Total size of the files to be copied by this thread + my $oBackupManifest = shift; # Backup manifest object (only used when single-threaded) - my $iThreadIdx = $args[0]; # Defines the index of this thread - my $bChecksum = $args[1]; # Should checksums be generated on files after they have been backed up? - my $bPathCreate = $args[2]; # Should paths be created automatically? - my $lSizeTotal = $args[3]; # Total size of the files to be copied by this thread + my $lSize = 0; # Size of files currently copied by this thread + my $strLog; # Store the log message + my $strLogProgress; # Part of the log message that shows progress + my $oFileThread; # Thread local file object + my $bCopyResult; # Copy result + my $strCopyChecksum; # Copy checksum + my $lCopySize; # Copy Size - my $lSize = 0; # Size of files currently copied by this thread - my $strLog; # Store the log message - my $oFileThread = $oFile->clone($iThreadIdx); # Thread local file object + # If multi-threaded, then clone the file object + if ($bMulti) + { + $oFileThread = $oFile->clone($iThreadIdx); + } + else + { + $oFileThread = $oFile; + } # When a KILL signal is received, immediately abort $SIG{'KILL'} = sub {threads->exit();}; # Iterate through all the files in this thread's queue to be copied from the database to the backup - while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue()) + while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue_nb()) { # Add the size of the current file to keep track of percent complete $lSize += $oFileCopyMap{$strFile}{size}; - # Output information about the file to be copied - $strLog = "thread ${iThreadIdx} backed up file $oFileCopyMap{$strFile}{db_file} (" . - file_size_format($oFileCopyMap{$strFile}{size}) . - ($lSizeTotal > 0 ? ', ' . int($lSize * 100 / $lSizeTotal) . '%' : '') . ')'; - - # Copy the file from the database to the backup (will return false if the source file is missing) - unless($oFileThread->copy(PATH_DB_ABSOLUTE, $oFileCopyMap{$strFile}{db_file}, - PATH_BACKUP_TMP, $oFileCopyMap{$strFile}{backup_file} . - ($bCompress ? '.' . $oFile->{strCompressExtension} : ''), - false, # Source is not compressed since it is the db directory - $bCompress, # Destination should be compressed based on backup settings - true, # Ignore missing files - undef, undef, # Do not set permissions or modification time - true)) # Create the destiation directory if it does not exist + if (!$oFileCopyMap{$strFile}{checksum_only}) { - # If file is missing assume the database removed it (else corruption and nothing we can do!) - &log(INFO, "thread ${iThreadIdx} skipped file removed by database: " . $oFileCopyMap{$strFile}{db_file}); + # Output information about the file to be copied + $strLog = "thread ${iThreadIdx} backing up file"; - # Write a message into the master queue to have the file removed from the manifest - $oMasterQueue[$iThreadIdx]->enqueue("remove|$oFileCopyMap{$strFile}{file_section}|$oFileCopyMap{$strFile}{file}"); + # Copy the file from the database to the backup (will return false if the source file is missing) + ($bCopyResult, $strCopyChecksum, $lCopySize) = + $oFileThread->copy(PATH_DB_ABSOLUTE, $oFileCopyMap{$strFile}{db_file}, + PATH_BACKUP_TMP, $oFileCopyMap{$strFile}{backup_file} . + ($bCompress ? '.' . $oFile->{strCompressExtension} : ''), + false, # Source is not compressed since it is the db directory + $bCompress, # Destination should be compressed based on backup settings + true, # Ignore missing files + $oFileCopyMap{$strFile}{modification_time}, # Set modification time + undef, # Do not set original mode + true); # Create the destination directory if it does not exist - # Move on to the next file - next; + if (!$bCopyResult) + { + # If file is missing assume the database removed it (else corruption and nothing we can do!) + &log(INFO, "thread ${iThreadIdx} skipped file removed by database: " . $oFileCopyMap{$strFile}{db_file}); + + # Remove file from the manifest + if ($bMulti) + { + # Write a message into the master queue to have the file removed from the manifest + $oMasterQueue[$iThreadIdx]->enqueue("remove|$oFileCopyMap{$strFile}{file_section}|". + "$oFileCopyMap{$strFile}{file}"); + } + else + { + # remove it directly + $oBackupManifest->remove($oFileCopyMap{$strFile}{file_section}, $oFileCopyMap{$strFile}{file}); + } + + # Move on to the next file + next; + } } + $strLogProgress = "$oFileCopyMap{$strFile}{db_file} (" . file_size_format($lCopySize) . + ($lSizeTotal > 0 ? ', ' . int($lSize * 100 / $lSizeTotal) . '%' : '') . ')'; + # Generate checksum for file if configured - # if ($bChecksum && $lSize != 0) - # { - # # Generate the checksum - # my $strChecksum = $oFileThread->file_hash_get(PATH_BACKUP_TMP, $oFileCopyMap{$strFile}{backup_file}); - # - # # Write the checksum message into the master queue - # $oMasterQueue[$iThreadIdx]->enqueue("checksum|$oFileCopyMap{$strFile}{file_section}|$oFileCopyMap{$strFile}{file}|${strChecksum}"); - # - # &log(INFO, $strLog . " checksum ${strChecksum}"); - # } - # else - # { - &log(INFO, $strLog); - # } + if ($lCopySize != 0) + { + # Store checksum in the manifest + if ($bMulti) + { + # Write the checksum message into the master queue + $oMasterQueue[$iThreadIdx]->enqueue("checksum|$oFileCopyMap{$strFile}{file_section}|" . + "$oFileCopyMap{$strFile}{file}|${strCopyChecksum}|${lCopySize}"); + } + else + { + # Write it directly + $oBackupManifest->set($oFileCopyMap{$strFile}{file_section}, $oFileCopyMap{$strFile}{file}, + MANIFEST_SUBKEY_CHECKSUM, $strCopyChecksum); + $oBackupManifest->set($oFileCopyMap{$strFile}{file_section}, $oFileCopyMap{$strFile}{file}, + MANIFEST_SUBKEY_SIZE, $lCopySize + 0); + } + + # Output information about the file to be checksummed + if (!defined($strLog)) + { + $strLog = "thread ${iThreadIdx} checksum-only ${strLogProgress}"; + } + + &log(INFO, $strLog . " checksum ${strCopyChecksum}"); + } + else + { + &log(INFO, $strLog . ' ' . $strLogProgress); + } + + &log(TRACE, "thread waiting for new file from queue"); } &log(DEBUG, "thread ${iThreadIdx} exiting"); @@ -1299,54 +1107,168 @@ sub backup # Create the cluster backup path $oFile->path_create(PATH_BACKUP_CLUSTER, undef, undef, true); - # Declare the backup manifest - my %oBackupManifest; - - # Find the previous backup based on the type - my $strBackupLastPath = backup_type_find($strType, $oFile->path_get(PATH_BACKUP_CLUSTER)); - - my %oLastManifest; - - if (defined($strBackupLastPath)) - { - config_load($oFile->path_get(PATH_BACKUP_CLUSTER) . "/${strBackupLastPath}/backup.manifest", \%oLastManifest); - - if (!defined($oLastManifest{backup}{label})) - { - confess &log(ERROR, "unable to find label in backup ${strBackupLastPath}"); - } - - &log(INFO, "last backup label: $oLastManifest{backup}{label}, version $oLastManifest{backup}{version}"); - ${oBackupManifest}{backup}{prior} = $oLastManifest{backup}{label}; - } - # Build backup tmp and config my $strBackupTmpPath = $oFile->path_get(PATH_BACKUP_TMP); my $strBackupConfFile = $oFile->path_get(PATH_BACKUP_TMP, 'backup.manifest'); - # Start backup - ${oBackupManifest}{backup}{timestamp_start} = $strTimestampStart; + # Declare the backup manifest + my $oBackupManifest = new BackRest::Manifest($strBackupConfFile, false); - my $strArchiveStart = $oDb->backup_start('pg_backrest backup started ' . $strTimestampStart, $bStartFast); - ${oBackupManifest}{backup}{'archive-start'} = $strArchiveStart; - ${oBackupManifest}{backup}{version} = version_get(); + # Find the previous backup based on the type + my $oLastManifest = undef; - &log(INFO, 'archive start: ' . ${oBackupManifest}{backup}{'archive-start'}); + my $strBackupLastPath = backup_type_find($strType, $oFile->path_get(PATH_BACKUP_CLUSTER)); + + if (defined($strBackupLastPath)) + { + $oLastManifest = new BackRest::Manifest($oFile->path_get(PATH_BACKUP_CLUSTER) . "/${strBackupLastPath}/backup.manifest"); + + &log(INFO, 'last backup label: ' . $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL) . + ', version ' . $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_VERSION)); + } + else + { + if ($strType eq BACKUP_TYPE_DIFF) + { + &log(WARN, 'No full backup exists, differential backup has been changed to full'); + } + elsif ($strType eq BACKUP_TYPE_INCR) + { + &log(WARN, 'No prior backup exists, incremental backup has been changed to full'); + } + + $strType = BACKUP_TYPE_FULL; + } + + # Backup settings + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE, undef, $strType); + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_START, undef, $strTimestampStart); + $oBackupManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, $bCompress ? 'y' : 'n'); + $oBackupManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, $bHardLink ? 'y' : 'n'); + + # Start backup (unless no-start-stop is set) + my $strArchiveStart; + + if ($bNoStartStop) + { + if ($oFile->exists(PATH_DB_ABSOLUTE, $strDbClusterPath . '/' . FILE_POSTMASTER_PID)) + { + if ($bForce) + { + &log(WARN, '--no-start-stop passed and ' . FILE_POSTMASTER_PID . ' exists but --force was passed so backup will ' . + 'continue though it looks like the postmaster is running and the backup will probably not be ' . + 'consistent'); + } + else + { + &log(ERROR, '--no-start-stop passed but ' . FILE_POSTMASTER_PID . ' exists - looks like the postmaster is ' . + 'running. Shutdown the postmaster and try again, or use --force.'); + exit 1; + } + } + } + else + { + my $strTimestampDbStart; + + ($strArchiveStart, $strTimestampDbStart) = + $oDb->backup_start('pg_backrest backup started ' . $strTimestampStart, $bStartFast); + + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START, undef, $strArchiveStart); + &log(INFO, "archive start: ${strArchiveStart}"); + } + + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_VERSION, undef, version_get()); # Build the backup manifest my %oTablespaceMap; - $oDb->tablespace_map_get(\%oTablespaceMap); - backup_manifest_build($strDbClusterPath, \%oBackupManifest, \%oLastManifest, \%oTablespaceMap); + if (!$bNoStartStop) + { + $oDb->tablespace_map_get(\%oTablespaceMap); + } + + $oBackupManifest->build($oFile, $strDbClusterPath, $oLastManifest, $bNoStartStop, \%oTablespaceMap); &log(TEST, TEST_MANIFEST_BUILD); - # If the backup tmp path already exists, remove invalid files + # Check if an aborted backup exists for this stanza if (-e $strBackupTmpPath) { - &log(WARN, 'aborted backup already exists, will be cleaned to remove invalid files and resumed'); + my $bUsable = false; - # Clean the old backup tmp path - backup_tmp_clean(\%oBackupManifest); + my $strType = $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE); + my $strPrior = $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, ''); + my $strVersion = $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_VERSION); + + my $strAbortedType = ''; + my $strAbortedPrior = ''; + my $strAbortedVersion = ''; + my $oAbortedManifest; + + # Attempt to read the manifest file in the aborted backup to see if the backup type and prior backup are the same as the + # new backup that is being started. If any error at all occurs then the backup will be considered unusable and a resume + # will not be attempted. + eval + { + # Load the aborted manifest + $oAbortedManifest = new BackRest::Manifest("${strBackupTmpPath}/backup.manifest"); + + # Default values if they are not set + $strAbortedType = $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE); + $strAbortedPrior = $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, ''); + $strAbortedVersion = $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_VERSION); + + # The backup is usable if between the current backup and the aborted backup: + # 1) The version matches + # 2) The type of both is full or the types match and prior matches + if ($strAbortedVersion eq $strVersion) + { + if ($strAbortedType eq BACKUP_TYPE_FULL + && $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE) eq BACKUP_TYPE_FULL) + { + $bUsable = true; + } + elsif ($strAbortedType eq $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE) && + $strAbortedPrior eq $oBackupManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR)) + { + $bUsable = true; + } + } + }; + + # If the aborted backup is usable then clean it + if ($bUsable) + { + &log(WARN, 'aborted backup of same type exists, will be cleaned to remove invalid files and resumed'); + &log(TEST, TEST_BACKUP_RESUME); + + # Clean the old backup tmp path + backup_tmp_clean($oBackupManifest, $oAbortedManifest); + } + # Else remove it + else + { + my $strReason = "new version '${strVersion}' does not match aborted version '${strVersion}'"; + + if ($strVersion eq $strAbortedVersion) + { + if ($strType ne $strAbortedType) + { + $strReason = "new type '${strType}' does not match aborted type '${strAbortedType}'"; + } + else + { + $strReason = "new prior '${strPrior}' does not match aborted prior '${strAbortedPrior}'"; + } + } + + &log(WARN, "aborted backup exists, but cannot be resumed (${strReason}) - will be dropped and recreated"); + &log(TEST, TEST_BACKUP_NORESUME); + + remove_tree($oFile->path_get(PATH_BACKUP_TMP)) + or confess &log(ERROR, "unable to delete tmp path: ${strBackupTmpPath}"); + $oFile->path_create(PATH_BACKUP_TMP); + } } # Else create the backup tmp path else @@ -1355,31 +1277,35 @@ sub backup $oFile->path_create(PATH_BACKUP_TMP); } - # Write the VERSION file - my $hVersionFile; - open($hVersionFile, '>', "${strBackupTmpPath}/version") or confess 'unable to open version file'; - print $hVersionFile version_get(); - close($hVersionFile); - - # Save the backup conf file first time - so we can see what is happening in the backup - config_save($strBackupConfFile, \%oBackupManifest); + # Save the backup manifest + $oBackupManifest->save(); # Perform the backup - backup_file($strDbClusterPath, \%oBackupManifest); + backup_file($strDbClusterPath, $oBackupManifest); - # Stop backup - my $strArchiveStop = $oDb->backup_stop(); + # Stop backup (unless no-start-stop is set) + my $strArchiveStop; - ${oBackupManifest}{backup}{'archive-stop'} = $strArchiveStop; - &log(INFO, 'archive stop: ' . ${oBackupManifest}{backup}{'archive-stop'}); + if (!$bNoStartStop) + { + my $strTimestampDbStop; + ($strArchiveStop, $strTimestampDbStop) = $oDb->backup_stop(); + + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP, undef, $strArchiveStop); + + &log(INFO, 'archive stop: ' . $strArchiveStop); + } # If archive logs are required to complete the backup, then fetch them. This is the default, but can be overridden if the # archive logs are going to a different server. Be careful here because there is no way to verify that the backup will be # consistent - at least not in this routine. - if ($bArchiveRequired) + if (!optionGet(OPTION_NO_START_STOP) && optionGet(OPTION_BACKUP_ARCHIVE_CHECK)) { - # Save the backup conf file second time - before getting archive logs in case that fails - config_save($strBackupConfFile, \%oBackupManifest); + # Save the backup manifest a second time - before getting archive logs in case that fails + $oBackupManifest->save(); + + # Create the modification time for the archive logs + my $lModificationTime = time(); # After the backup has been stopped, need to make a copy of the archive logs need to make the db consistent &log(DEBUG, "retrieving archive logs ${strArchiveStart}:${strArchiveStop}"); @@ -1399,22 +1325,52 @@ sub backup confess &log(ERROR, "Zero or more than one file found for glob: ${strArchivePath}"); } - &log(DEBUG, "archiving: ${strArchive} (${stryArchiveFile[0]})"); + if (optionGet(OPTION_BACKUP_ARCHIVE_COPY)) + { + &log(DEBUG, "archiving: ${strArchive} (${stryArchiveFile[0]})"); - $oFile->copy(PATH_BACKUP_ARCHIVE, $stryArchiveFile[0], - PATH_BACKUP_TMP, "base/pg_xlog/${strArchive}" . ($bCompress ? ".$oFile->{strCompressExtension}" : ''), - $stryArchiveFile[0] =~ "^.*\.$oFile->{strCompressExtension}\$", - $bCompress); + # Copy the log file from the archive repo to the backup + my $strDestinationFile = "base/pg_xlog/${strArchive}" . ($bCompress ? ".$oFile->{strCompressExtension}" : ''); + + my ($bCopyResult, $strCopyChecksum, $lCopySize) = + $oFile->copy(PATH_BACKUP_ARCHIVE, $stryArchiveFile[0], + PATH_BACKUP_TMP, $strDestinationFile, + $stryArchiveFile[0] =~ "^.*\.$oFile->{strCompressExtension}\$", + $bCompress, undef, $lModificationTime); + + # Add the archive file to the manifest so it can be part of the restore and checked in validation + my $strPathSection = 'base:path'; + my $strPathLog = 'pg_xlog'; + my $strFileSection = 'base:file'; + my $strFileLog = "pg_xlog/${strArchive}"; + + # Compare the checksum against the one already in the archive log name + if ($stryArchiveFile[0] !~ "^${strArchive}-${strCopyChecksum}(\\.$oFile->{strCompressExtension}){0,1}\$") + { + confess &log(ERROR, "error copying log '$stryArchiveFile[0]' to backup - checksum recorded with file does " . + "not match actual checksum of '${strCopyChecksum}'", ERROR_CHECKSUM); + } + + # Set manifest values + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_USER, + $oBackupManifest->get($strPathSection, $strPathLog, MANIFEST_SUBKEY_USER)); + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_GROUP, + $oBackupManifest->get($strPathSection, $strPathLog, MANIFEST_SUBKEY_GROUP)); + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_MODE, '0700'); + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_MODIFICATION_TIME, $lModificationTime); + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_SIZE, $lCopySize); + $oBackupManifest->set($strFileSection, $strFileLog, MANIFEST_SUBKEY_CHECKSUM, $strCopyChecksum); + } } } # Create the path for the new backup my $strBackupPath; - if ($strType eq 'full' || !defined($strBackupLastPath)) + if ($strType eq BACKUP_TYPE_FULL || !defined($strBackupLastPath)) { $strBackupPath = timestamp_file_string_get() . 'F'; - $strType = 'full'; + $strType = BACKUP_TYPE_FULL; } else { @@ -1422,7 +1378,7 @@ sub backup $strBackupPath .= '_' . timestamp_file_string_get(); - if ($strType eq 'differential') + if ($strType eq BACKUP_TYPE_DIFF) { $strBackupPath .= 'D'; } @@ -1433,17 +1389,21 @@ sub backup } # Record timestamp stop in the config - ${oBackupManifest}{backup}{timestamp_stop} = timestamp_string_get(); - ${oBackupManifest}{backup}{label} = $strBackupPath; + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_STOP, undef, timestamp_string_get()); + $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL, undef, $strBackupPath); - # Save the backup conf file final time - config_save($strBackupConfFile, \%oBackupManifest); + # Save the backup manifest final time + $oBackupManifest->save(); &log(INFO, "new backup label: ${strBackupPath}"); # Rename the backup tmp path to complete the backup &log(DEBUG, "moving ${strBackupTmpPath} to " . $oFile->path_get(PATH_BACKUP_CLUSTER, $strBackupPath)); $oFile->move(PATH_BACKUP_TMP, undef, PATH_BACKUP_CLUSTER, $strBackupPath); + + # Create a link to the most recent backup + $oFile->remove(PATH_BACKUP_CLUSTER, "latest"); + $oFile->link_create(PATH_BACKUP_CLUSTER, $strBackupPath, PATH_BACKUP_CLUSTER, "latest", undef, true); } #################################################################################################################################### @@ -1549,7 +1509,8 @@ sub backup_expire # be consistent if the process dies foreach $strPath ($oFile->list(PATH_BACKUP_CLUSTER, undef, '^' . $stryPath[$iIndex] . '.*', 'reverse')) { - system("rm -rf ${strBackupClusterPath}/${strPath}") == 0 or confess &log(ERROR, "unable to delete backup ${strPath}"); + system("rm -rf ${strBackupClusterPath}/${strPath}") == 0 + or confess &log(ERROR, "unable to delete backup ${strPath}"); } &log(INFO, 'removed expired full backup: ' . $stryPath[$iIndex]); @@ -1581,7 +1542,8 @@ sub backup_expire # Remove all differential and incremental backups before the oldest valid differential if ($strPath lt $stryPath[$iDifferentialRetention - 1]) { - system("rm -rf ${strBackupClusterPath}/${strPath}") == 0 or confess &log(ERROR, "unable to delete backup ${strPath}"); + system("rm -rf ${strBackupClusterPath}/${strPath}") == 0 + or confess &log(ERROR, "unable to delete backup ${strPath}"); &log(INFO, "removed expired diff/incr backup ${strPath}"); } } @@ -1596,7 +1558,7 @@ sub backup_expire } # Determine which backup type to use for archive retention (full, differential, incremental) - if ($strArchiveRetentionType eq 'full') + if ($strArchiveRetentionType eq BACKUP_TYPE_FULL) { if (!defined($iArchiveRetention)) { @@ -1605,7 +1567,7 @@ sub backup_expire @stryPath = $oFile->list(PATH_BACKUP_CLUSTER, undef, backup_regexp_get(1, 0, 0), 'reverse'); } - elsif ($strArchiveRetentionType eq 'differential' || $strArchiveRetentionType eq 'diff') + elsif ($strArchiveRetentionType eq BACKUP_TYPE_DIFF) { if (!defined($iArchiveRetention)) { @@ -1614,7 +1576,7 @@ sub backup_expire @stryPath = $oFile->list(PATH_BACKUP_CLUSTER, undef, backup_regexp_get(1, 1, 0), 'reverse'); } - elsif ($strArchiveRetentionType eq 'incremental' || $strArchiveRetentionType eq 'incr') + elsif ($strArchiveRetentionType eq BACKUP_TYPE_INCR) { @stryPath = $oFile->list(PATH_BACKUP_CLUSTER, undef, backup_regexp_get(1, 1, 1), 'reverse'); } @@ -1648,7 +1610,7 @@ sub backup_expire if (!defined($strArchiveRetentionBackup)) { - if ($strArchiveRetentionType eq 'full' && scalar @stryPath > 0) + if ($strArchiveRetentionType eq BACKUP_TYPE_FULL && scalar @stryPath > 0) { &log(INFO, 'fewer than required backups for retention, but since archive_retention_type = full using oldest full backup'); $strArchiveRetentionBackup = $stryPath[scalar @stryPath - 1]; @@ -1664,9 +1626,8 @@ sub backup_expire # even though they are also in the pg_xlog directory (since they have been copied more than once). &log(INFO, 'archive retention based on backup ' . $strArchiveRetentionBackup); - my %oManifest; - config_load($oFile->path_get(PATH_BACKUP_CLUSTER) . "/${strArchiveRetentionBackup}/backup.manifest", \%oManifest); - my $strArchiveLast = ${oManifest}{backup}{'archive-start'}; + my $oManifest = new BackRest::Manifest($oFile->path_get(PATH_BACKUP_CLUSTER) . "/${strArchiveRetentionBackup}/backup.manifest"); + my $strArchiveLast = $oManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START); if (!defined($strArchiveLast)) { @@ -1700,7 +1661,8 @@ sub backup_expire # Delete if the first 24 characters less than the current archive file if ($strSubPath lt substr($strArchiveLast, 0, 24)) { - unlink($oFile->path_get(PATH_BACKUP_ARCHIVE, $strSubPath)) or confess &log(ERROR, 'unable to remove ' . $strSubPath); + unlink($oFile->path_get(PATH_BACKUP_ARCHIVE, $strSubPath)) + or confess &log(ERROR, 'unable to remove ' . $strSubPath); &log(DEBUG, 'removed expired archive file ' . $strSubPath); } } diff --git a/lib/BackRest/Config.pm b/lib/BackRest/Config.pm new file mode 100644 index 000000000..6cfe7da0f --- /dev/null +++ b/lib/BackRest/Config.pm @@ -0,0 +1,1534 @@ +#################################################################################################################################### +# CONFIG MODULE +#################################################################################################################################### +package BackRest::Config; + +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename qw(dirname); +use Cwd qw(abs_path); +use Exporter qw(import); + +use lib dirname($0) . '/../lib'; +use BackRest::Exception; +use BackRest::Utility; + +#################################################################################################################################### +# Export functions +#################################################################################################################################### +our @EXPORT = qw(configLoad optionGet optionTest optionRuleGet optionRequired optionDefault operationGet operationTest + operationSet); + +#################################################################################################################################### +# Operation constants - basic operations that are allowed in backrest +#################################################################################################################################### +use constant +{ + OP_ARCHIVE_GET => 'archive-get', + OP_ARCHIVE_PUSH => 'archive-push', + OP_BACKUP => 'backup', + OP_RESTORE => 'restore', + OP_EXPIRE => 'expire' +}; + +push @EXPORT, qw(OP_ARCHIVE_GET OP_ARCHIVE_PUSH OP_BACKUP OP_RESTORE OP_EXPIRE); + +#################################################################################################################################### +# BACKUP Type Constants +#################################################################################################################################### +use constant +{ + BACKUP_TYPE_FULL => 'full', + BACKUP_TYPE_DIFF => 'diff', + BACKUP_TYPE_INCR => 'incr' +}; + +push @EXPORT, qw(BACKUP_TYPE_FULL BACKUP_TYPE_DIFF BACKUP_TYPE_INCR); + +#################################################################################################################################### +# RECOVERY Type Constants +#################################################################################################################################### +use constant +{ + RECOVERY_TYPE_NAME => 'name', + RECOVERY_TYPE_TIME => 'time', + RECOVERY_TYPE_XID => 'xid', + RECOVERY_TYPE_PRESERVE => 'preserve', + RECOVERY_TYPE_NONE => 'none', + RECOVERY_TYPE_DEFAULT => 'default' +}; + +push @EXPORT, qw(RECOVERY_TYPE_NAME RECOVERY_TYPE_TIME RECOVERY_TYPE_XID RECOVERY_TYPE_PRESERVE RECOVERY_TYPE_NONE + RECOVERY_TYPE_DEFAULT); + +#################################################################################################################################### +# Configuration section constants +#################################################################################################################################### +use constant +{ + CONFIG_GLOBAL => 'global', + + CONFIG_SECTION_ARCHIVE => 'archive', + CONFIG_SECTION_BACKUP => 'backup', + CONFIG_SECTION_COMMAND => 'command', + CONFIG_SECTION_GENERAL => 'general', + CONFIG_SECTION_LOG => 'log', + CONFIG_SECTION_RESTORE_RECOVERY_SETTING => 'restore:recovery-setting', + CONFIG_SECTION_RESTORE_TABLESPACE_MAP => 'restore:tablespace-map', + CONFIG_SECTION_EXPIRE => 'expire', + CONFIG_SECTION_STANZA => 'stanza' +}; + +push @EXPORT, qw(CONFIG_GLOBAL + + CONFIG_SECTION_ARCHIVE CONFIG_SECTION_BACKUP CONFIG_SECTION_COMMAND + CONFIG_SECTION_GENERAL CONFIG_SECTION_LOG CONFIG_SECTION_RESTORE_RECOVERY_SETTING + CONFIG_SECTION_EXPIRE CONFIG_SECTION_STANZA CONFIG_SECTION_RESTORE_TABLESPACE_MAP); + +#################################################################################################################################### +# Option constants +#################################################################################################################################### +use constant +{ + # Command-line-only options + OPTION_CONFIG => 'config', + OPTION_DELTA => 'delta', + OPTION_FORCE => 'force', + OPTION_NO_START_STOP => 'no-start-stop', + OPTION_SET => 'set', + OPTION_STANZA => 'stanza', + OPTION_TARGET => 'target', + OPTION_TARGET_EXCLUSIVE => 'target-exclusive', + OPTION_TARGET_RESUME => 'target-resume', + OPTION_TARGET_TIMELINE => 'target-timeline', + OPTION_TYPE => 'type', + + # Command-line/conf file options + # GENERAL Section + OPTION_BUFFER_SIZE => 'buffer-size', + OPTION_COMPRESS => 'compress', + OPTION_COMPRESS_LEVEL => 'compress-level', + OPTION_COMPRESS_LEVEL_NETWORK => 'compress-level-network', + OPTION_REPO_PATH => 'repo-path', + OPTION_REPO_REMOTE_PATH => 'repo-remote-path', + OPTION_THREAD_MAX => 'thread-max', + OPTION_THREAD_TIMEOUT => 'thread-timeout', + + # ARCHIVE Section + OPTION_ARCHIVE_MAX_MB => 'archive-max-mb', + OPTION_ARCHIVE_ASYNC => 'archive-async', + + # BACKUP Section + OPTION_BACKUP_ARCHIVE_CHECK => 'archive-check', + OPTION_BACKUP_ARCHIVE_COPY => 'archive-copy', + OPTION_HARDLINK => 'hardlink', + OPTION_BACKUP_HOST => 'backup-host', + OPTION_BACKUP_USER => 'backup-user', + OPTION_START_FAST => 'start-fast', + + # COMMAND Section + OPTION_COMMAND_REMOTE => 'cmd-remote', + OPTION_COMMAND_PSQL => 'cmd-psql', + OPTION_COMMAND_PSQL_OPTION => 'cmd-psql-option', + + # LOG Section + OPTION_LOG_LEVEL_CONSOLE => 'log-level-console', + OPTION_LOG_LEVEL_FILE => 'log-level-file', + + # EXPIRE Section + OPTION_RETENTION_ARCHIVE => 'retention-archive', + OPTION_RETENTION_ARCHIVE_TYPE => 'retention-archive-type', + OPTION_RETENTION_DIFF => 'retention-' . BACKUP_TYPE_DIFF, + OPTION_RETENTION_FULL => 'retention-' . BACKUP_TYPE_FULL, + + # RESTORE Section + OPTION_RESTORE_TABLESPACE_MAP => 'tablespace-map', + OPTION_RESTORE_RECOVERY_SETTING => 'recovery-setting', + + # STANZA Section + OPTION_DB_HOST => 'db-host', + OPTION_DB_PATH => 'db-path', + OPTION_DB_USER => 'db-user', + + # Command-line-only help/version options + OPTION_HELP => 'help', + OPTION_VERSION => 'version', + + # Command-line-only test options + OPTION_TEST => 'test', + OPTION_TEST_DELAY => 'test-delay', + OPTION_TEST_NO_FORK => 'no-fork' +}; + +push @EXPORT, qw(OPTION_CONFIG OPTION_DELTA OPTION_FORCE OPTION_NO_START_STOP OPTION_SET OPTION_STANZA OPTION_TARGET + OPTION_TARGET_EXCLUSIVE OPTION_TARGET_RESUME OPTION_TARGET_TIMELINE OPTION_TYPE + + OPTION_DB_HOST OPTION_BACKUP_HOST OPTION_ARCHIVE_MAX_MB OPTION_BACKUP_ARCHIVE_CHECK OPTION_BACKUP_ARCHIVE_COPY + OPTION_ARCHIVE_ASYNC + OPTION_BUFFER_SIZE OPTION_COMPRESS OPTION_COMPRESS_LEVEL OPTION_COMPRESS_LEVEL_NETWORK OPTION_HARDLINK + OPTION_PATH_ARCHIVE OPTION_REPO_PATH OPTION_REPO_REMOTE_PATH OPTION_DB_PATH OPTION_LOG_LEVEL_CONSOLE + OPTION_LOG_LEVEL_FILE + OPTION_RESTORE_RECOVERY_SETTING OPTION_RETENTION_ARCHIVE OPTION_RETENTION_ARCHIVE_TYPE OPTION_RETENTION_FULL + OPTION_RETENTION_DIFF OPTION_START_FAST OPTION_THREAD_MAX OPTION_THREAD_TIMEOUT + OPTION_DB_USER OPTION_BACKUP_USER OPTION_COMMAND_PSQL OPTION_COMMAND_PSQL_OPTION OPTION_COMMAND_REMOTE + OPTION_RESTORE_TABLESPACE_MAP + + OPTION_TEST OPTION_TEST_DELAY OPTION_TEST_NO_FORK); + +#################################################################################################################################### +# Option Defaults +#################################################################################################################################### +use constant +{ + OPTION_DEFAULT_BUFFER_SIZE => 1048576, + OPTION_DEFAULT_BUFFER_SIZE_MIN => 4096, + OPTION_DEFAULT_BUFFER_SIZE_MAX => 8388608, + + OPTION_DEFAULT_COMPRESS => true, + OPTION_DEFAULT_COMPRESS_LEVEL => 6, + OPTION_DEFAULT_COMPRESS_LEVEL_MIN => 0, + OPTION_DEFAULT_COMPRESS_LEVEL_MAX => 9, + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK => 3, + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK_MIN => 0, + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK_MAX => 9, + + OPTION_DEFAULT_CONFIG => '/etc/pg_backrest.conf', + OPTION_DEFAULT_LOG_LEVEL_CONSOLE => lc(WARN), + OPTION_DEFAULT_LOG_LEVEL_FILE => lc(INFO), + OPTION_DEFAULT_THREAD_MAX => 1, + + OPTION_DEFAULT_ARCHIVE_ASYNC => false, + + OPTION_DEFAULT_COMMAND_PSQL => '/usr/bin/psql -X', + OPTION_DEFAULT_COMMAND_REMOTE => dirname(abs_path($0)) . '/pg_backrest_remote.pl', + + OPTION_DEFAULT_BACKUP_ARCHIVE_CHECK => true, + OPTION_DEFAULT_BACKUP_ARCHIVE_COPY => false, + OPTION_DEFAULT_BACKUP_FORCE => false, + OPTION_DEFAULT_BACKUP_HARDLINK => false, + OPTION_DEFAULT_BACKUP_NO_START_STOP => false, + OPTION_DEFAULT_BACKUP_START_FAST => false, + OPTION_DEFAULT_BACKUP_TYPE => BACKUP_TYPE_INCR, + + OPTION_DEFAULT_REPO_PATH => '/var/lib/backup', + + OPTION_DEFAULT_RESTORE_DELTA => false, + OPTION_DEFAULT_RESTORE_FORCE => false, + OPTION_DEFAULT_RESTORE_SET => 'latest', + OPTION_DEFAULT_RESTORE_TYPE => RECOVERY_TYPE_DEFAULT, + OPTION_DEFAULT_RESTORE_TARGET_EXCLUSIVE => false, + OPTION_DEFAULT_RESTORE_TARGET_RESUME => false, + + OPTION_DEFAULT_RETENTION_ARCHIVE_TYPE => BACKUP_TYPE_FULL, + OPTION_DEFAULT_RETENTION_MIN => 1, + OPTION_DEFAULT_RETENTION_MAX => 999999999, + + OPTION_DEFAULT_TEST => false, + OPTION_DEFAULT_TEST_DELAY => 5, + OPTION_DEFAULT_TEST_NO_FORK => false +}; + +push @EXPORT, qw(OPTION_DEFAULT_BUFFER_SIZE OPTION_DEFAULT_COMPRESS OPTION_DEFAULT_CONFIG OPTION_LEVEL_CONSOLE OPTION_LEVEL_FILE + OPTION_DEFAULT_THREAD_MAX + + OPTION_DEFAULT_COMPRESS OPTION_DEFAULT_COMPRESS_LEVEL OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK + OPTION_DEFAULT_COMMAND_REMOTE + + OPTION_DEFAULT_BACKUP_FORCE OPTION_DEFAULT_BACKUP_NO_START_STOP OPTION_DEFAULT_BACKUP_TYPE + + OPTION_DEFAULT_RESTORE_DELTA OPTION_DEFAULT_RESTORE_FORCE OPTION_DEFAULT_RESTORE_SET OPTION_DEFAULT_RESTORE_TYPE + OPTION_DEFAULT_RESTORE_TARGET_EXCLUSIVE OPTION_DEFAULT_RESTORE_TARGET_RESUME + + OPTION_DEFAULT_TEST OPTION_DEFAULT_TEST_DELAY OPTION_DEFAULT_TEST_NO_FORK); + +#################################################################################################################################### +# Option Rules +#################################################################################################################################### +use constant +{ + OPTION_RULE_ALLOW_LIST => 'allow-list', + OPTION_RULE_ALLOW_RANGE => 'allow-range', + OPTION_RULE_DEFAULT => 'default', + OPTION_RULE_DEPEND => 'depend', + OPTION_RULE_DEPEND_OPTION => 'depend-option', + OPTION_RULE_DEPEND_LIST => 'depend-list', + OPTION_RULE_DEPEND_VALUE => 'depend-value', + OPTION_RULE_NEGATE => 'negate', + OPTION_RULE_OPERATION => 'operation', + OPTION_RULE_REQUIRED => 'required', + OPTION_RULE_SECTION => 'section', + OPTION_RULE_SECTION_INHERIT => 'section-inherit', + OPTION_RULE_TYPE => 'type' +}; + +push @EXPORT, qw(OPTION_RULE_ALLOW_LIST OPTION_RULE_ALLOW_RANGE OPTION_RULE_DEFAULT OPTION_RULE_DEPEND OPTION_RULE_DEPEND_OPTION + OPTION_RULE_DEPEND_LIST OPTION_RULE_DEPEND_VALUE OPTION_RULE_NEGATE OPTION_RULE_OPERATION OPTION_RULE_REQUIRED + OPTION_RULE_SECTION OPTION_RULE_SECTION_INHERIT OPTION_RULE_TYPE); + +#################################################################################################################################### +# Option Types +#################################################################################################################################### +use constant +{ + OPTION_TYPE_BOOLEAN => 'boolean', + OPTION_TYPE_FLOAT => 'float', + OPTION_TYPE_HASH => 'hash', + OPTION_TYPE_INTEGER => 'integer', + OPTION_TYPE_STRING => 'string' +}; + +push @EXPORT, qw(OPTION_TYPE_BOOLEAN OPTION_TYPE_FLOAT OPTION_TYPE_INTEGER OPTION_TYPE_STRING); + +#################################################################################################################################### +# Option Rule Hash +#################################################################################################################################### +my %oOptionRule = +( + # Command-line-only option rule + #------------------------------------------------------------------------------------------------------------------------------- + &OPTION_CONFIG => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_CONFIG, + &OPTION_RULE_NEGATE => true + }, + + &OPTION_DELTA => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_DELTA, + } + } + }, + + &OPTION_FORCE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_FORCE, + }, + + &OP_BACKUP => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_FORCE, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_NO_START_STOP, + &OPTION_RULE_DEPEND_VALUE => true + } + } + } + }, + + &OPTION_NO_START_STOP => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_NO_START_STOP + } + } + }, + + &OPTION_SET => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_TYPE, + } + } + }, + + &OPTION_STANZA => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING + }, + + &OPTION_TARGET => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TYPE, + &OPTION_RULE_DEPEND_LIST => + { + &RECOVERY_TYPE_NAME => true, + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true + } + } + } + } + }, + + &OPTION_TARGET_EXCLUSIVE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_TARGET_EXCLUSIVE, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TYPE, + &OPTION_RULE_DEPEND_LIST => + { + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true + } + } + } + } + }, + + &OPTION_TARGET_RESUME => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_TARGET_RESUME, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TYPE, + &OPTION_RULE_DEPEND_LIST => + { + &RECOVERY_TYPE_NAME => true, + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true + } + } + } + } + }, + + &OPTION_TARGET_TIMELINE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => + { + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TYPE, + &OPTION_RULE_DEPEND_LIST => + { + &RECOVERY_TYPE_DEFAULT => true, + &RECOVERY_TYPE_NAME => true, + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true + } + } + } + } + }, + + &OPTION_TYPE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_TYPE, + &OPTION_RULE_ALLOW_LIST => + { + &BACKUP_TYPE_FULL => true, + &BACKUP_TYPE_DIFF => true, + &BACKUP_TYPE_INCR => true, + } + }, + + &OP_RESTORE => + { + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RESTORE_TYPE, + &OPTION_RULE_ALLOW_LIST => + { + &RECOVERY_TYPE_NAME => true, + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true, + &RECOVERY_TYPE_PRESERVE => true, + &RECOVERY_TYPE_NONE => true, + &RECOVERY_TYPE_DEFAULT => true + } + } + } + }, + + # Command-line/conf option rules + #------------------------------------------------------------------------------------------------------------------------------- + &OPTION_COMMAND_REMOTE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_COMMAND_REMOTE, + &OPTION_RULE_SECTION => CONFIG_SECTION_COMMAND, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + &OPTION_COMMAND_PSQL => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_COMMAND_PSQL, + &OPTION_RULE_SECTION => CONFIG_SECTION_COMMAND, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + } + }, + + &OPTION_COMMAND_PSQL_OPTION => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_SECTION => CONFIG_SECTION_COMMAND, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + }, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_COMMAND_PSQL + } + }, + + &OPTION_ARCHIVE_ASYNC => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_ARCHIVE_ASYNC, + &OPTION_RULE_SECTION => CONFIG_SECTION_ARCHIVE, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_PUSH => true + } + }, + + &OPTION_DB_HOST => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_STANZA, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + } + }, + + &OPTION_DB_USER => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_SECTION => CONFIG_SECTION_STANZA, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + }, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_DB_HOST + } + }, + + &OPTION_BACKUP_HOST => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_BACKUP, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_RESTORE => true + }, + }, + + &OPTION_BACKUP_USER => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_SECTION => CONFIG_SECTION_BACKUP, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_RESTORE => true + }, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_BACKUP_HOST + } + }, + + &OPTION_REPO_PATH => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_REPO_PATH, + &OPTION_RULE_SECTION => CONFIG_SECTION_GENERAL, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_BACKUP => true, + &OP_RESTORE => true, + &OP_EXPIRE => true + }, + }, + + &OPTION_REPO_REMOTE_PATH => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_GENERAL, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_RESTORE => true + }, + }, + + &OPTION_DB_PATH => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_SECTION => CONFIG_SECTION_STANZA, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => + { + &OPTION_RULE_REQUIRED => false + }, + &OP_ARCHIVE_PUSH => + { + &OPTION_RULE_REQUIRED => false + }, + &OP_BACKUP => true + }, + }, + + &OPTION_BUFFER_SIZE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BUFFER_SIZE, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_BUFFER_SIZE_MIN, OPTION_DEFAULT_BUFFER_SIZE_MAX] + }, + + &OPTION_ARCHIVE_MAX_MB => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_ARCHIVE, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_PUSH => true + } + }, + + &OPTION_BACKUP_ARCHIVE_CHECK => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_ARCHIVE_CHECK, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + } + }, + + &OPTION_BACKUP_ARCHIVE_COPY => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_ARCHIVE_COPY, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => + { + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_BACKUP_ARCHIVE_CHECK, + &OPTION_RULE_DEPEND_VALUE => true + } + } + } + }, + + &OPTION_COMPRESS => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_COMPRESS, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + &OPTION_COMPRESS_LEVEL => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_COMPRESS_LEVEL, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_COMPRESS_LEVEL_MIN, OPTION_DEFAULT_COMPRESS_LEVEL_MAX], + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + &OPTION_COMPRESS_LEVEL_NETWORK => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK_MIN, OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK_MAX], + &OPTION_RULE_OPERATION => + { + &OP_ARCHIVE_GET => true, + &OP_ARCHIVE_PUSH => true, + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + &OPTION_HARDLINK => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_HARDLINK, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + } + }, + + &OPTION_LOG_LEVEL_CONSOLE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_LOG_LEVEL_CONSOLE, + &OPTION_RULE_SECTION => CONFIG_SECTION_LOG, + &OPTION_RULE_ALLOW_LIST => + { + lc(OFF) => true, + lc(ERROR) => true, + lc(WARN) => true, + lc(INFO) => true, + lc(DEBUG) => true, + lc(TRACE) => true + } + }, + + &OPTION_LOG_LEVEL_FILE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_LOG_LEVEL_FILE, + &OPTION_RULE_SECTION => CONFIG_SECTION_LOG, + &OPTION_RULE_ALLOW_LIST => + { + lc(OFF) => true, + lc(ERROR) => true, + lc(WARN) => true, + lc(INFO) => true, + lc(DEBUG) => true, + lc(TRACE) => true + } + }, + + &OPTION_RESTORE_TABLESPACE_MAP => + { + &OPTION_RULE_TYPE => OPTION_TYPE_HASH, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_RESTORE_TABLESPACE_MAP, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => 1 + }, + }, + + &OPTION_RESTORE_RECOVERY_SETTING => + { + &OPTION_RULE_TYPE => OPTION_TYPE_HASH, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_RESTORE_RECOVERY_SETTING, + &OPTION_RULE_OPERATION => + { + &OP_RESTORE => 1 + }, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TYPE, + &OPTION_RULE_DEPEND_LIST => + { + &RECOVERY_TYPE_DEFAULT => true, + &RECOVERY_TYPE_NAME => true, + &RECOVERY_TYPE_TIME => true, + &RECOVERY_TYPE_XID => true + } + } + }, + + &OPTION_RETENTION_ARCHIVE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_EXPIRE, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_RETENTION_MIN, OPTION_DEFAULT_RETENTION_MAX], + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_EXPIRE => true + } + }, + + &OPTION_RETENTION_ARCHIVE_TYPE => + { + &OPTION_RULE_TYPE => OPTION_TYPE_STRING, + &OPTION_RULE_REQUIRED => true, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_RETENTION_ARCHIVE_TYPE, + &OPTION_RULE_SECTION => CONFIG_SECTION_EXPIRE, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_EXPIRE => true + }, + &OPTION_RULE_ALLOW_LIST => + { + &BACKUP_TYPE_FULL => 1, + &BACKUP_TYPE_DIFF => 1, + &BACKUP_TYPE_INCR => 1 + }, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_RETENTION_ARCHIVE + } + }, + + &OPTION_RETENTION_DIFF => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_EXPIRE, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_RETENTION_MIN, OPTION_DEFAULT_RETENTION_MAX], + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_EXPIRE => true + } + }, + + &OPTION_RETENTION_FULL => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => CONFIG_SECTION_EXPIRE, + &OPTION_RULE_ALLOW_RANGE => [OPTION_DEFAULT_RETENTION_MIN, OPTION_DEFAULT_RETENTION_MAX], + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_EXPIRE => true + } + }, + + &OPTION_START_FAST => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_BACKUP_START_FAST, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true + } + }, + + &OPTION_THREAD_MAX => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_THREAD_MAX, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + &OPTION_THREAD_TIMEOUT => + { + &OPTION_RULE_TYPE => OPTION_TYPE_INTEGER, + &OPTION_RULE_REQUIRED => false, + &OPTION_RULE_SECTION => true, + &OPTION_RULE_SECTION_INHERIT => CONFIG_SECTION_GENERAL, + &OPTION_RULE_OPERATION => + { + &OP_BACKUP => true, + &OP_RESTORE => true + } + }, + + # Command-line-only test option rules + #------------------------------------------------------------------------------------------------------------------------------- + &OPTION_TEST => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_TEST + }, + + &OPTION_TEST_DELAY => + { + &OPTION_RULE_TYPE => OPTION_TYPE_FLOAT, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_TEST_DELAY, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TEST, + &OPTION_RULE_DEPEND_VALUE => true + } + }, + + &OPTION_TEST_NO_FORK => + { + &OPTION_RULE_TYPE => OPTION_TYPE_BOOLEAN, + &OPTION_RULE_DEFAULT => OPTION_DEFAULT_TEST_NO_FORK, + &OPTION_RULE_DEPEND => + { + &OPTION_RULE_DEPEND_OPTION => OPTION_TEST + } + } +); + +#################################################################################################################################### +# Global variables +#################################################################################################################################### +my %oOption; # Option hash +my $strOperation; # Operation (backup, archive-get, ...) + +#################################################################################################################################### +# configLoad +# +# Load configuration. +#################################################################################################################################### +sub configLoad +{ + # Clear option in case it was loaded before + %oOption = (); + + # Build hash with all valid command-line options + my %oOptionAllow; + + foreach my $strKey (keys(%oOptionRule)) + { + my $strOption = $strKey; + + if (!defined($oOptionRule{$strKey}{&OPTION_RULE_TYPE})) + { + confess &log(ASSERT, "Option ${strKey} does not have a defined type", ERROR_ASSERT); + } + elsif ($oOptionRule{$strKey}{&OPTION_RULE_TYPE} eq OPTION_TYPE_HASH) + { + $strOption .= '=s@'; + } + elsif ($oOptionRule{$strKey}{&OPTION_RULE_TYPE} ne OPTION_TYPE_BOOLEAN) + { + $strOption .= '=s'; + } + + $oOptionAllow{$strOption} = $strOption; + + # Check if the option can be negated + if (defined($oOptionRule{$strKey}{&OPTION_RULE_NEGATE}) && $oOptionRule{$strKey}{&OPTION_RULE_NEGATE}) + { + $strOption = "no-${strKey}"; + $oOptionAllow{$strOption} = $strOption; + } + } + + # Get command-line options + use Getopt::Long qw(GetOptions); + my %oOptionTest; + + if (!GetOptions(\%oOptionTest, %oOptionAllow)) + { + print "\n"; + print 'pg_backrest ' . version_get() . "\n"; + print "\n"; + use Pod::Usage; + pod2usage(2); + }; + + # Display version and exit if requested + if (defined($oOptionTest{&OPTION_VERSION}) || defined($oOptionTest{&OPTION_HELP})) + { + print 'pg_backrest ' . version_get() . "\n"; + + if (!defined($oOptionTest{&OPTION_HELP})) + { + exit 0; + } + } + + # Display help and exit if requested + if (defined($oOptionTest{&OPTION_HELP})) + { + print "\n"; + pod2usage(); + exit 0; + } + + # Validate and store options + optionValid(\%oOptionTest); + + # Replace command psql options if set + if (optionTest(OPTION_COMMAND_PSQL) && optionTest(OPTION_COMMAND_PSQL_OPTION)) + { + $oOption{&OPTION_COMMAND_PSQL} =~ s/\%option\%/$oOption{&OPTION_COMMAND_PSQL_OPTION}/g; + } + + # Set repo-remote-path to repo-path if it is not set + if (optionTest(OPTION_REPO_PATH) && !optionTest(OPTION_REPO_REMOTE_PATH)) + { + $oOption{&OPTION_REPO_REMOTE_PATH} = optionGet(OPTION_REPO_PATH); + } +} + +#################################################################################################################################### +# optionValid +# +# Make sure the command-line options are valid based on the operation. +#################################################################################################################################### +sub optionValid +{ + my $oOptionTest = shift; + + # Check that the operation is present and valid + $strOperation = $ARGV[0]; + + if (!defined($strOperation)) + { + confess &log(ERROR, "operation must be specified", ERROR_OPERATION_REQUIRED); + } + + if ($strOperation ne OP_ARCHIVE_GET && + $strOperation ne OP_ARCHIVE_PUSH && + $strOperation ne OP_BACKUP && + $strOperation ne OP_RESTORE && + $strOperation ne OP_EXPIRE) + { + confess &log(ERROR, "invalid operation ${strOperation}"); + } + + # Set the operation section - because of the various archive commands this is not always the operation + my $strOperationSection; + + if (operationTest(OP_ARCHIVE_GET) || operationTest(OP_ARCHIVE_PUSH)) + { + $strOperationSection = CONFIG_SECTION_ARCHIVE; + } + else + { + $strOperationSection = $strOperation; + } + + # Hash to store contents of the config file. The file will be loaded one the config dependency is resolved unless all options + # are set on the command line or --no-config is specified. + my $oConfig; + my $bConfigExists = true; + + # Keep track of unresolved dependencies + my $bDependUnresolved = true; + my %oOptionResolved; + + # Loop through all possible options + while ($bDependUnresolved) + { + # Assume that all dependencies will be resolved in this loop + $bDependUnresolved = false; + + foreach my $strOption (sort(keys(%oOptionRule))) + { + # Skip the option if it has been resolved in a prior loop + if (defined($oOptionResolved{$strOption})) + { + next; + } + + # Store the option value since it is used a lot + my $strValue = $$oOptionTest{$strOption}; + + # Check to see if an option can be negated. Make sure that it is not set and negated at the same time. + my $bNegate = false; + + if (defined($oOptionRule{$strOption}{&OPTION_RULE_NEGATE}) && $oOptionRule{$strOption}{&OPTION_RULE_NEGATE}) + { + $bNegate = defined($$oOptionTest{'no-' . $strOption}); + + if ($bNegate && defined($strValue)) + { + confess &log(ERROR, "option '${strOption}' cannot be both set and negated", ERROR_OPTION_NEGATE); + } + } + + # If the option value is undefined and not negated, see if it can be loaded from pg_backrest.conf + if (!defined($strValue) && !$bNegate && $strOption ne OPTION_CONFIG && + $oOptionRule{$strOption}{&OPTION_RULE_SECTION}) + { + + # If the config option has not been resolved yet then continue processing + if (!defined($oOptionResolved{&OPTION_CONFIG}) || !defined($oOptionResolved{&OPTION_STANZA})) + { + $bDependUnresolved = true; + next; + } + + # If the config option is defined try to get the option from the config file + if ($bConfigExists && defined($oOption{&OPTION_CONFIG})) + { + # Attempt to load the config file if it has not been loaded + if (!defined($oConfig)) + { + my $strConfigFile = $oOption{&OPTION_CONFIG}; + $bConfigExists = -e $strConfigFile; + + if ($bConfigExists) + { + if (!-f $strConfigFile) + { + confess &log(ERROR, "'${strConfigFile}' is not a file", ERROR_FILE_INVALID); + } + + $oConfig = ini_load($strConfigFile); + } + } + + # Get the section that the value should be in + my $strSection = defined($oOptionRule{$strOption}{&OPTION_RULE_SECTION}) ? + ($oOptionRule{$strOption}{&OPTION_RULE_SECTION} eq '1' ? + $strOperationSection : $oOptionRule{$strOption}{&OPTION_RULE_SECTION}) : undef; + + # Only look in the stanza section when $strSection = true + if ($strSection eq CONFIG_SECTION_STANZA) + { + $strValue = $$oConfig{optionGet(OPTION_STANZA)}{$strOption}; + } + # Else do a full search + else + { + # First check in the stanza section + $strValue = $oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_HASH ? + $$oConfig{optionGet(OPTION_STANZA) . ":${strSection}"} : + $$oConfig{optionGet(OPTION_STANZA) . ":${strSection}"}{$strOption}; + + # Else check for an inherited stanza section + if (!defined($strValue)) + { + my $strInheritedSection = undef; + + $strInheritedSection = $oOptionRule{$strOption}{&OPTION_RULE_SECTION_INHERIT}; + + if (defined($strInheritedSection)) + { + $strValue = $$oConfig{optionGet(OPTION_STANZA) . ":${strInheritedSection}"}{$strOption}; + } + + # Else check the global section + if (!defined($strValue)) + { + $strValue = $oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_HASH ? + $$oConfig{&CONFIG_GLOBAL . ":${strSection}"} : + $$oConfig{&CONFIG_GLOBAL . ":${strSection}"}{$strOption}; + + # Else check the global inherited section + if (!defined($strValue) && defined($strInheritedSection)) + { + $strValue = $$oConfig{&CONFIG_GLOBAL . ":${strInheritedSection}"}{$strOption}; + } + } + } + } + + # Fix up data types + if (defined($strValue)) + { + if ($strValue eq '') + { + $strValue = undef; + } + elsif ($oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_BOOLEAN) + { + if ($strValue eq 'y') + { + $strValue = true; + } + elsif ($strValue eq 'n') + { + $strValue = false; + } + else + { + confess &log(ERROR, "'${strValue}' is not valid for '${strOption}' option", + ERROR_OPTION_INVALID_VALUE); + } + } + } + } + } + + # If the operation has rules store them for later evaluation + my $oOperationRule = optionOperationRule($strOption, $strOperation); + + # Check dependency for the operation then for the option + my $bDependResolved = true; + my $oDepend = defined($oOperationRule) ? $$oOperationRule{&OPTION_RULE_DEPEND} : + $oOptionRule{$strOption}{&OPTION_RULE_DEPEND}; + + if (defined($oDepend)) + { + # Make sure the depend option has been resolved, otherwise skip this option for now + my $strDependOption = $$oDepend{&OPTION_RULE_DEPEND_OPTION}; + + if (!defined($oOptionResolved{$strDependOption})) + { + $bDependUnresolved = true; + next; + } + + # Check if the depend option has a value + my $strDependValue = $oOption{$strDependOption}; + my $strError = "option '${strOption}' not valid without option '${strDependOption}'"; + + $bDependResolved = defined($strDependValue) ? true : false; + + if (!$bDependResolved && defined($strValue)) + { + confess &log(ERROR, $strError, ERROR_OPTION_INVALID); + } + + # If a depend value exists, make sure the option value matches + if ($bDependResolved && defined($$oDepend{&OPTION_RULE_DEPEND_VALUE}) && + $$oDepend{&OPTION_RULE_DEPEND_VALUE} ne $strDependValue) + { + $bDependResolved = false; + + if (defined($strValue)) + { + if ($oOptionRule{$strDependOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_BOOLEAN) + { + if (!$$oDepend{&OPTION_RULE_DEPEND_VALUE}) + { + confess &log(ASSERT, "no error has been created for unused case where depend value = false"); + } + } + else + { + $strError .= " = '$$oDepend{&OPTION_RULE_DEPEND_VALUE}'"; + } + + confess &log(ERROR, $strError, ERROR_OPTION_INVALID); + } + } + + # If a depend list exists, make sure the value is in the list + if ($bDependResolved && defined($$oDepend{&OPTION_RULE_DEPEND_LIST}) && + !defined($$oDepend{&OPTION_RULE_DEPEND_LIST}{$strDependValue})) + { + $bDependResolved = false; + + if (defined($strValue)) + { + my @oyValue; + + foreach my $strValue (sort(keys($$oDepend{&OPTION_RULE_DEPEND_LIST}))) + { + push(@oyValue, "'${strValue}'"); + } + + $strError .= @oyValue == 1 ? " = $oyValue[0]" : " in (" . join(", ", @oyValue) . ")"; + confess &log(ERROR, $strError, ERROR_OPTION_INVALID); + } + } + } + + # Is the option defined? + if (defined($strValue)) + { + # Check that floats and integers are valid + if ($oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_INTEGER || + $oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_FLOAT) + { + # Test that the string is a valid float or integer by adding 1 to it. It's pretty hokey but it works and it + # beats requiring Scalar::Util::Numeric to do it properly. + eval + { + my $strTest = $strValue + 1; + }; + + my $bError = $@ ? true : false; + + # Check that integers are really integers + if (!$bError && $oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_INTEGER && + (int($strValue) . 'S') ne ($strValue . 'S')) + { + $bError = true; + } + + # Error if the value did not pass tests + !$bError + or confess &log(ERROR, "'${strValue}' is not valid for '${strOption}' option", ERROR_OPTION_INVALID_VALUE); + } + + # Process an allow list for the operation then for the option + my $oAllow = defined($oOperationRule) ? $$oOperationRule{&OPTION_RULE_ALLOW_LIST} : + $oOptionRule{$strOption}{&OPTION_RULE_ALLOW_LIST}; + + if (defined($oAllow) && !defined($$oAllow{$strValue})) + { + confess &log(ERROR, "'${strValue}' is not valid for '${strOption}' option", ERROR_OPTION_INVALID_VALUE); + } + + # Process an allow range for the operation then for the option + $oAllow = defined($oOperationRule) ? $$oOperationRule{&OPTION_RULE_ALLOW_RANGE} : + $oOptionRule{$strOption}{&OPTION_RULE_ALLOW_RANGE}; + + if (defined($oAllow) && ($strValue < $$oAllow[0] || $strValue > $$oAllow[1])) + { + confess &log(ERROR, "'${strValue}' is not valid for '${strOption}' option", ERROR_OPTION_INVALID_RANGE); + } + + # Set option value + if ($oOptionRule{$strOption}{&OPTION_RULE_TYPE} eq OPTION_TYPE_HASH && ref($strValue) eq 'ARRAY') + { + foreach my $strItem (@{$strValue}) + { + # Check for = and make sure there is a least one character on each side + my $iEqualPos = index($strItem, '='); + + if ($iEqualPos < 1 || length($strItem) <= $iEqualPos + 1) + { + confess &log(ERROR, "'${strItem}' not valid key/value for '${strOption}' option", + ERROR_OPTION_INVALID_PAIR); + } + + # Check that the key has not already been set + my $strKey = substr($strItem, 0, $iEqualPos); + + if (defined($oOption{$strOption}{$strKey})) + { + confess &log(ERROR, "'${$strItem}' already defined for '${strOption}' option", + ERROR_OPTION_DUPLICATE_KEY); + } + + $oOption{$strOption}{$strKey} = substr($strItem, $iEqualPos + 1); + } + } + else + { + $oOption{$strOption} = $strValue; + } + } + # Else try to set a default + elsif ($bDependResolved && + (!defined($oOptionRule{$strOption}{&OPTION_RULE_OPERATION}) || + defined($oOptionRule{$strOption}{&OPTION_RULE_OPERATION}{$strOperation}))) + { + # Check for default in operation then option + my $strDefault = optionDefault($strOption, $strOperation); + + # If default is defined + if (defined($strDefault)) + { + # Only set default if dependency is resolved + $oOption{$strOption} = $strDefault if !$bNegate; + } + # Else check required + elsif (optionRequired($strOption, $strOperation)) + { + confess &log(ERROR, "${strOperation} operation requires option: ${strOption}", ERROR_OPTION_REQUIRED); + } + } + + $oOptionResolved{$strOption} = true; + } + } +} + +#################################################################################################################################### +# optionOperationRule +# +# Returns the option rules based on the operation. +#################################################################################################################################### +sub optionOperationRule +{ + my $strOption = shift; + my $strOperation = shift; + + if (defined($strOperation)) + { + return defined($oOptionRule{$strOption}{&OPTION_RULE_OPERATION}) && + defined($oOptionRule{$strOption}{&OPTION_RULE_OPERATION}{$strOperation}) && + ref($oOptionRule{$strOption}{&OPTION_RULE_OPERATION}{$strOperation}) eq 'HASH' ? + $oOptionRule{$strOption}{&OPTION_RULE_OPERATION}{$strOperation} : undef; + } + + return undef; +} + +#################################################################################################################################### +# optionRequired +# +# Is the option required for this operation? +#################################################################################################################################### +sub optionRequired +{ + my $strOption = shift; + my $strOperation = shift; + + # Get the operation rule + my $oOperationRule = optionOperationRule($strOption, $strOperation); + + # Check for required in operation then option + my $bRequired = defined($oOperationRule) ? $$oOperationRule{&OPTION_RULE_REQUIRED} : + $oOptionRule{$strOption}{&OPTION_RULE_REQUIRED}; + + # Return required + return !defined($bRequired) || $bRequired; +} + + +#################################################################################################################################### +# optionDefault +# +# Does the option have a default for this operation? +#################################################################################################################################### +sub optionDefault +{ + my $strOption = shift; + my $strOperation = shift; + + # Get the operation rule + my $oOperationRule = optionOperationRule($strOption, $strOperation); + + # Check for default in operation + my $strDefault = defined($oOperationRule) ? $$oOperationRule{&OPTION_RULE_DEFAULT} : undef; + + # If defined return, else try to grab the global default + return defined($strDefault) ? $strDefault : $oOptionRule{$strOption}{&OPTION_RULE_DEFAULT}; +} + +#################################################################################################################################### +# operationGet +# +# Get the current operation. +#################################################################################################################################### +sub operationGet +{ + return $strOperation; +} + +#################################################################################################################################### +# operationTest +# +# Test the current operation. +#################################################################################################################################### +sub operationTest +{ + my $strOperationTest = shift; + + return $strOperationTest eq $strOperation; +} + +#################################################################################################################################### +# operationSet +# +# Set current operation (usually for triggering follow-on operations). +#################################################################################################################################### +sub operationSet +{ + my $strValue = shift; + + $strOperation = $strValue; +} + +#################################################################################################################################### +# optionGet +# +# Get option value. +#################################################################################################################################### +sub optionGet +{ + my $strOption = shift; + my $bRequired = shift; + + if (!defined($oOption{$strOption}) && (!defined($bRequired) || $bRequired)) + { + confess &log(ASSERT, "option ${strOption} is required"); + } + + return $oOption{$strOption}; +} + +#################################################################################################################################### +# optionTest +# +# Test a option value. +#################################################################################################################################### +sub optionTest +{ + my $strOption = shift; + my $strValue = shift; + + if (defined($strValue)) + { + return optionGet($strOption) eq $strValue; + } + + return defined($oOption{$strOption}); +} + +#################################################################################################################################### +# optionRuleGet +# +# Get the option rules. +#################################################################################################################################### +sub optionRuleGet +{ + use Storable qw(dclone); + return dclone(\%oOptionRule); +} + +1; diff --git a/lib/BackRest/Db.pm b/lib/BackRest/Db.pm index 6441f8083..659885c17 100644 --- a/lib/BackRest/Db.pm +++ b/lib/BackRest/Db.pm @@ -3,34 +3,43 @@ #################################################################################################################################### package BackRest::Db; -use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); -use Moose; use Net::OpenSSH; use File::Basename; use IPC::System::Simple qw(capture); +use Exporter qw(import); use lib dirname($0); use BackRest::Utility; -# Command strings -has strCommandPsql => (is => 'bare'); # PSQL command +#################################################################################################################################### +# Postmaster process Id file +#################################################################################################################################### +use constant FILE_POSTMASTER_PID => 'postmaster.pid'; -# Module variables -has strDbUser => (is => 'ro'); # Database user -has strDbHost => (is => 'ro'); # Database host -has oDbSSH => (is => 'bare'); # Database SSH object -has fVersion => (is => 'ro'); # Database version +our @EXPORT = qw(FILE_POSTMASTER_PID); #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### -sub BUILD +sub new { - my $self = shift; + my $class = shift; # Class name + my $strCommandPsql = shift; # PSQL command + my $strDbHost = shift; # Database host name + my $strDbUser = shift; # Database user name (generally postgres) + + # Create the class hash + my $self = {}; + bless $self, $class; + + # Initialize variables + $self->{strCommandPsql} = $strCommandPsql; + $self->{strDbHost} = $strDbHost; + $self->{strDbUser} = $strDbUser; # Connect SSH object if db host is defined if (defined($self->{strDbHost}) && !defined($self->{oDbSSH})) @@ -44,6 +53,8 @@ sub BUILD master_opts => [-o => $strOptionSSHRequestTTY]); $self->{oDbSSH}->error and confess &log(ERROR, "unable to connect to $self->{strDbHost}: " . $self->{oDbSSH}->error); } + + return $self; } #################################################################################################################################### @@ -132,9 +143,11 @@ sub backup_start my $strLabel = shift; my $bStartFast = shift; - return trim($self->psql_execute("set client_min_messages = 'warning';" . - "copy (select pg_xlogfile_name(xlog) from pg_start_backup('${strLabel}'" . - ($bStartFast ? ', true' : '') . ') as xlog) to stdout')); + my @stryField = split("\t", trim($self->psql_execute("set client_min_messages = 'warning';" . + "copy (select to_char(current_timestamp, 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_xlogfile_name(xlog) from pg_start_backup('${strLabel}'" . + ($bStartFast ? ', true' : '') . ') as xlog) to stdout'))); + + return $stryField[1], $stryField[0]; } #################################################################################################################################### @@ -144,9 +157,10 @@ sub backup_stop { my $self = shift; - return trim($self->psql_execute("set client_min_messages = 'warning';" . - "copy (select pg_xlogfile_name(xlog) from pg_stop_backup() as xlog) to stdout")) + my @stryField = split("\t", trim($self->psql_execute("set client_min_messages = 'warning';" . + "copy (select to_char(clock_timestamp(), 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_xlogfile_name(xlog) from pg_stop_backup() as xlog) to stdout"))); + + return $stryField[1], $stryField[0]; } -no Moose; - __PACKAGE__->meta->make_immutable; +1; diff --git a/lib/BackRest/Exception.pm b/lib/BackRest/Exception.pm index c9815a95a..dd1bb3920 100644 --- a/lib/BackRest/Exception.pm +++ b/lib/BackRest/Exception.pm @@ -3,16 +3,58 @@ #################################################################################################################################### package BackRest::Exception; -use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); +use Exporter qw(import); -use Moose; +#################################################################################################################################### +# Exception Codes +#################################################################################################################################### +use constant +{ + ERROR_ASSERT => 100, + ERROR_CHECKSUM => 101, + ERROR_CONFIG => 102, + ERROR_FILE_INVALID => 103, + ERROR_FORMAT => 104, + ERROR_OPERATION_REQUIRED => 105, + ERROR_OPTION_INVALID => 106, + ERROR_OPTION_INVALID_VALUE => 107, + ERROR_OPTION_INVALID_RANGE => 108, + ERROR_OPTION_INVALID_PAIR => 109, + ERROR_OPTION_DUPLICATE_KEY => 110, + ERROR_OPTION_NEGATE => 111, + ERROR_OPTION_REQUIRED => 112, + ERROR_POSTMASTER_RUNNING => 113, + ERROR_PROTOCOL => 114, + ERROR_RESTORE_PATH_NOT_EMPTY => 115 +}; -# Module variables -has iCode => (is => 'bare'); # Exception code -has strMessage => (is => 'bare'); # Exception message +our @EXPORT = qw(ERROR_ASSERT ERROR_CHECKSUM ERROR_CONFIG ERROR_FILE_INVALID ERROR_FORMAT ERROR_OPERATION_REQUIRED + ERROR_OPTION_INVALID ERROR_OPTION_INVALID_VALUE ERROR_OPTION_INVALID_RANGE ERROR_OPTION_INVALID_PAIR + ERROR_OPTION_DUPLICATE_KEY ERROR_OPTION_NEGATE ERROR_OPTION_REQUIRED ERROR_POSTMASTER_RUNNING ERROR_PROTOCOL + ERROR_RESTORE_PATH_NOT_EMPTY); + +#################################################################################################################################### +# CONSTRUCTOR +#################################################################################################################################### +sub new +{ + my $class = shift; # Class name + my $iCode = shift; # Error code + my $strMessage = shift; # ErrorMessage + + # Create the class hash + my $self = {}; + bless $self, $class; + + # Initialize exception + $self->{iCode} = $iCode; + $self->{strMessage} = $strMessage; + + return $self; +} #################################################################################################################################### # CODE @@ -34,5 +76,4 @@ sub message return $self->{strMessage}; } -no Moose; -__PACKAGE__->meta->make_immutable; +1; diff --git a/lib/BackRest/File.pm b/lib/BackRest/File.pm index 010778432..5d70746c7 100644 --- a/lib/BackRest/File.pm +++ b/lib/BackRest/File.pm @@ -3,61 +3,24 @@ #################################################################################################################################### package BackRest::File; -use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); -use Moose; use Net::OpenSSH; -use File::Basename; +use File::Basename qw(dirname basename); use File::Copy qw(cp); use File::Path qw(make_path remove_tree); use Digest::SHA; use File::stat; use Fcntl ':mode'; -use IO::Compress::Gzip qw(gzip $GzipError); -use IO::Uncompress::Gunzip qw(gunzip $GunzipError); -use IO::String; +use Exporter qw(import); use lib dirname($0) . '/../lib'; use BackRest::Exception; use BackRest::Utility; use BackRest::Remote; -# Exports -use Exporter qw(import); -our @EXPORT = qw(PATH_ABSOLUTE PATH_DB PATH_DB_ABSOLUTE PATH_BACKUP PATH_BACKUP_ABSOLUTE - PATH_BACKUP_CLUSTER PATH_BACKUP_TMP PATH_BACKUP_ARCHIVE - - COMMAND_ERR_FILE_MISSING COMMAND_ERR_FILE_READ COMMAND_ERR_FILE_MOVE COMMAND_ERR_FILE_TYPE - COMMAND_ERR_LINK_READ COMMAND_ERR_PATH_MISSING COMMAND_ERR_PATH_CREATE COMMAND_ERR_PARAM - - PIPE_STDIN PIPE_STDOUT PIPE_STDERR - - REMOTE_DB REMOTE_BACKUP REMOTE_NONE - - OP_FILE_LIST OP_FILE_EXISTS OP_FILE_HASH OP_FILE_REMOVE OP_FILE_MANIFEST OP_FILE_COMPRESS - OP_FILE_MOVE OP_FILE_COPY OP_FILE_COPY_OUT OP_FILE_COPY_IN OP_FILE_PATH_CREATE); - -# Extension and permissions -has strCompressExtension => (is => 'ro', default => 'gz'); -has strDefaultPathPermission => (is => 'bare', default => '0750'); -has strDefaultFilePermission => (is => 'ro', default => '0640'); - -# Command strings -has strCommand => (is => 'bare'); - -# Module variables -has strRemote => (is => 'bare'); # Remote type (db or backup) -has oRemote => (is => 'bare'); # Remote object - -has strBackupPath => (is => 'bare'); # Backup base path - -# Process flags -has strStanza => (is => 'bare'); -has iThreadIdx => (is => 'bare'); - #################################################################################################################################### # COMMAND Error Constants #################################################################################################################################### @@ -73,21 +36,28 @@ use constant COMMAND_ERR_PATH_READ => 8 }; +our @EXPORT = qw(COMMAND_ERR_FILE_MISSING COMMAND_ERR_FILE_READ COMMAND_ERR_FILE_MOVE COMMAND_ERR_FILE_TYPE COMMAND_ERR_LINK_READ + COMMAND_ERR_PATH_MISSING COMMAND_ERR_PATH_CREATE COMMAND_ERR_PARAM); + #################################################################################################################################### # PATH_GET Constants #################################################################################################################################### use constant { - PATH_ABSOLUTE => 'absolute', - PATH_DB => 'db', - PATH_DB_ABSOLUTE => 'db:absolute', - PATH_BACKUP => 'backup', - PATH_BACKUP_ABSOLUTE => 'backup:absolute', - PATH_BACKUP_CLUSTER => 'backup:cluster', - PATH_BACKUP_TMP => 'backup:tmp', - PATH_BACKUP_ARCHIVE => 'backup:archive' + PATH_ABSOLUTE => 'absolute', + PATH_DB => 'db', + PATH_DB_ABSOLUTE => 'db:absolute', + PATH_BACKUP => 'backup', + PATH_BACKUP_ABSOLUTE => 'backup:absolute', + PATH_BACKUP_CLUSTER => 'backup:cluster', + PATH_BACKUP_TMP => 'backup:tmp', + PATH_BACKUP_ARCHIVE => 'backup:archive', + PATH_BACKUP_ARCHIVE_OUT => 'backup:archive:out' }; +push @EXPORT, qw(PATH_ABSOLUTE PATH_DB PATH_DB_ABSOLUTE PATH_BACKUP PATH_BACKUP_ABSOLUTE PATH_BACKUP_CLUSTER PATH_BACKUP_TMP + PATH_BACKUP_ARCHIVE PATH_BACKUP_ARCHIVE_OUT); + #################################################################################################################################### # STD Pipe Constants #################################################################################################################################### @@ -98,21 +68,15 @@ use constant PIPE_STDERR => '' }; -#################################################################################################################################### -# Remote Types -#################################################################################################################################### -use constant -{ - REMOTE_DB => PATH_DB, - REMOTE_BACKUP => PATH_BACKUP, - REMOTE_NONE => 'none' -}; +push @EXPORT, qw(PIPE_STDIN PIPE_STDOUT PIPE_STDERR); #################################################################################################################################### # Operation constants #################################################################################################################################### use constant { + OP_FILE_OWNER => 'File->owner', + OP_FILE_WAIT => 'File->wait', OP_FILE_LIST => 'File->list', OP_FILE_EXISTS => 'File->exists', OP_FILE_HASH => 'File->hash', @@ -123,31 +87,63 @@ use constant OP_FILE_COPY => 'File->copy', OP_FILE_COPY_OUT => 'File->copy_out', OP_FILE_COPY_IN => 'File->copy_in', - OP_FILE_PATH_CREATE => 'File->path_create' + OP_FILE_PATH_CREATE => 'File->path_create', + OP_FILE_LINK_CREATE => 'File->link_create' }; +push @EXPORT, qw(OP_FILE_OWNER OP_FILE_WAIT OP_FILE_LIST OP_FILE_EXISTS OP_FILE_HASH OP_FILE_REMOVE OP_FILE_MANIFEST + OP_FILE_COMPRESS OP_FILE_MOVE OP_FILE_COPY OP_FILE_COPY_OUT OP_FILE_COPY_IN OP_FILE_PATH_CREATE); + #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### -sub BUILD +sub new { - my $self = shift; + my $class = shift; + my $strStanza = shift; + my $strBackupPath = shift; + my $strRemote = shift; + my $oRemote = shift; + my $strDefaultPathMode = shift; + my $strDefaultFileMode = shift; + my $iThreadIdx = shift; + + # Create the class hash + my $self = {}; + bless $self, $class; + + # Default compression extension to gz + $self->{strCompressExtension} = 'gz'; + + # Default file and path mode + $self->{strDefaultPathMode} = defined($strDefaultPathMode) ? $strDefaultPathMode : '0750'; + $self->{strDefaultFileMode} = defined($strDefaultFileMode) ? $strDefaultFileMode : '0640'; + + # Initialize other variables + $self->{strStanza} = $strStanza; + $self->{strBackupPath} = $strBackupPath; + $self->{strRemote} = $strRemote; + $self->{oRemote} = $oRemote; + $self->{iThreadIdx} = $iThreadIdx; + + # Remote object must be set + if (!defined($self->{oRemote})) + { + confess &log(ASSERT, 'oRemote must be defined'); + } # If remote is defined check parameters and open session - if (defined($self->{strRemote}) && $self->{strRemote} ne REMOTE_NONE) + if (defined($self->{strRemote}) && $self->{strRemote} ne NONE) { # Make sure remote is valid - if ($self->{strRemote} ne REMOTE_DB && $self->{strRemote} ne REMOTE_BACKUP) + if ($self->{strRemote} ne DB && $self->{strRemote} ne BACKUP) { - confess &log(ASSERT, 'strRemote must be "' . REMOTE_DB . '" or "' . REMOTE_BACKUP . '"'); - } - - # Remote object must be set - if (!defined($self->{oRemote})) - { - confess &log(ASSERT, 'oRemote must be defined'); + confess &log(ASSERT, 'strRemote must be "' . DB . '" or "' . BACKUP . + "\", $self->{strRemote} was passed"); } } + + return $self; } #################################################################################################################################### @@ -173,12 +169,13 @@ sub clone return BackRest::File->new ( - strCommand => $self->{strCommand}, - strRemote => $self->{strRemote}, - oRemote => defined($self->{oRemote}) ? $self->{oRemote}->clone($iThreadIdx) : undef, - strBackupPath => $self->{strBackupPath}, - strStanza => $self->{strStanza}, - iThreadIdx => $iThreadIdx + $self->{strStanza}, + $self->{strBackupPath}, + $self->{strRemote}, + defined($self->{oRemote}) ? $self->{oRemote}->clone() : undef, + $self->{strDefaultPathMode}, + $self->{strDefaultFileMode}, + $iThreadIdx ); } @@ -228,10 +225,11 @@ sub path_get confess &log(ASSERT, "absolute path ${strType}:${strFile} must start with /"); } - # Only allow temp files for PATH_BACKUP_ARCHIVE and PATH_BACKUP_TMP and any absolute path + # Only allow temp files for PATH_BACKUP_ARCHIVE, PATH_BACKUP_ARCHIVE_OUT, PATH_BACKUP_TMP and any absolute path $bTemp = defined($bTemp) ? $bTemp : false; - if ($bTemp && !($strType eq PATH_BACKUP_ARCHIVE || $strType eq PATH_BACKUP_TMP || $bAbsolute)) + if ($bTemp && !($strType eq PATH_BACKUP_ARCHIVE || $strType eq PATH_BACKUP_ARCHIVE_OUT || $strType eq PATH_BACKUP_TMP || + $bAbsolute)) { confess &log(ASSERT, 'temp file not supported on path ' . $strType); } @@ -279,28 +277,39 @@ sub path_get } # Get the backup archive path - if ($strType eq PATH_BACKUP_ARCHIVE) + if ($strType eq PATH_BACKUP_ARCHIVE_OUT || $strType eq PATH_BACKUP_ARCHIVE) { - my $strArchivePath = "$self->{strBackupPath}/archive/$self->{strStanza}"; - my $strArchive; + my $strArchivePath = "$self->{strBackupPath}/archive"; if ($bTemp) { - return "${strArchivePath}/file.tmp" . (defined($self->{iThreadIdx}) ? ".$self->{iThreadIdx}" : ''); + return "${strArchivePath}/temp/$self->{strStanza}-archive" . + (defined($self->{iThreadIdx}) ? "-$self->{iThreadIdx}" : '') . ".tmp"; } - if (defined($strFile)) + $strArchivePath .= "/$self->{strStanza}"; + + if ($strType eq PATH_BACKUP_ARCHIVE) { - $strArchive = substr(basename($strFile), 0, 24); + my $strArchive; - if ($strArchive !~ /^([0-F]){24}$/) + if (defined($strFile)) { - return "${strArchivePath}/${strFile}"; - } - } + $strArchive = substr(basename($strFile), 0, 24); - return $strArchivePath . (defined($strArchive) ? '/' . substr($strArchive, 0, 16) : '') . - (defined($strFile) ? '/' . $strFile : ''); + if ($strArchive !~ /^([0-F]){24}$/) + { + return "${strArchivePath}/${strFile}"; + } + } + + return $strArchivePath . (defined($strArchive) ? '/' . substr($strArchive, 0, 16) : '') . + (defined($strFile) ? '/' . $strFile : ''); + } + else + { + return "${strArchivePath}/out" . (defined($strFile) ? '/' . $strFile : ''); + } } if ($strType eq PATH_BACKUP_CLUSTER) @@ -348,7 +357,7 @@ sub link_create # if bPathCreate is not defined, default to true $bPathCreate = defined($bPathCreate) ? $bPathCreate : true; - # Source and destination path types must be the same (both PATH_DB or both PATH_BACKUP) + # Source and destination path types must be the same (e.g. both PATH_DB or both PATH_BACKUP, etc.) if ($self->path_type_get($strSourcePathType) ne $self->path_type_get($strDestinationPathType)) { confess &log(ASSERT, 'path types must be equal in link create'); @@ -358,6 +367,15 @@ sub link_create my $strSource = $self->path_get($strSourcePathType, $strSourceFile); my $strDestination = $self->path_get($strDestinationPathType, $strDestinationFile); + # Set operation and debug strings + my $strOperation = OP_FILE_LINK_CREATE; + + my $strDebug = "${strSourcePathType}" . (defined($strSource) ? ":${strSource}" : '') . + " to ${strDestinationPathType}" . (defined($strDestination) ? ":${strDestination}" : '') . + ', hard = ' . ($bHard ? 'true' : 'false') . ", relative = " . ($bRelative ? 'true' : 'false') . + ', destination_path_create = ' . ($bPathCreate ? 'true' : 'false'); + &log(DEBUG, "${strOperation}: ${strDebug}"); + # If the destination path is backup and does not exist, create it if ($bPathCreate && $self->path_type_get($strDestinationPathType) eq PATH_BACKUP) { @@ -392,22 +410,24 @@ sub link_create } } - # Create the command - my $strCommand = 'ln' . (!$bHard ? ' -s' : '') . " ${strSource} ${strDestination}"; - # Run remotely if ($self->is_remote($strSourcePathType)) { - &log(TRACE, "link_create: remote ${strSourcePathType} '${strCommand}'"); - - my $oSSH = $self->remote_get($strSourcePathType); - $oSSH->system($strCommand) or confess &log("unable to create link from ${strSource} to ${strDestination}"); + confess &log(ASSERT, "${strDebug}: remote operation not supported"); } # Run locally else { - &log(TRACE, "link_create: local '${strCommand}'"); - system($strCommand) == 0 or confess &log("unable to create link from ${strSource} to ${strDestination}"); + if ($bHard) + { + link($strSource, $strDestination) + or confess &log(ERROR, "unable to create hardlink from ${strSource} to ${strDestination}"); + } + else + { + symlink($strSource, $strDestination) + or confess &log(ERROR, "unable to create symlink from ${strSource} to ${strDestination}"); + } } } @@ -511,25 +531,8 @@ sub compress # Run locally else { - # Compress the file - if (!gzip($strPathOp => "${strPathOp}.gz")) - { - my $strError = "${strPathOp} could not be compressed:" . $!; - my $iErrorCode = COMMAND_ERR_FILE_READ; - - if (!$self->exists($strPathType, $strFile)) - { - $strError = "${strPathOp} does not exist"; - $iErrorCode = COMMAND_ERR_FILE_MISSING; - } - - if ($strPathType eq PATH_ABSOLUTE) - { - confess &log(ERROR, $strError, $iErrorCode); - } - - confess &log(ERROR, "${strDebug}: " . $strError); - } + # Use copy to compress the file + $self->copy($strPathType, $strFile, $strPathType, "${strFile}.gz", false, true); # Remove the old file unlink($strPathOp) @@ -547,7 +550,7 @@ sub path_create my $self = shift; my $strPathType = shift; my $strPath = shift; - my $strPermission = shift; + my $strMode = shift; my $bIgnoreExists = shift; # Set operation variables @@ -555,7 +558,7 @@ sub path_create # Set operation and debug strings my $strOperation = OP_FILE_PATH_CREATE; - my $strDebug = " ${strPathType}:${strPathOp}, permission " . (defined($strPermission) ? $strPermission : '[undef]'); + my $strDebug = " ${strPathType}:${strPathOp}, mode " . (defined($strMode) ? $strMode : '[undef]'); &log(DEBUG, "${strOperation}: ${strDebug}"); if ($self->is_remote($strPathType)) @@ -565,9 +568,9 @@ sub path_create $oParamHash{path} = ${strPathOp}; - if (defined($strPermission)) + if (defined($strMode)) { - $oParamHash{permission} = ${strPermission}; + $oParamHash{mode} = ${strMode}; } # Add remote info to debug string @@ -585,9 +588,9 @@ sub path_create # Attempt the create the directory my $stryError; - if (defined($strPermission)) + if (defined($strMode)) { - make_path($strPathOp, {mode => oct($strPermission), error => \$stryError}); + make_path($strPathOp, {mode => oct($strMode), error => \$stryError}); } else { @@ -695,7 +698,7 @@ sub remove my $bRemoved = true; # Set operation and debug strings - my $strOperation = OP_FILE_EXISTS; + my $strOperation = OP_FILE_REMOVE; my $strDebug = "${strPathType}:${strPathOp}"; &log(DEBUG, "${strOperation}: ${strDebug}"); @@ -743,15 +746,39 @@ sub hash my $self = shift; my $strPathType = shift; my $strFile = shift; + my $bCompressed = shift; my $strHashType = shift; + my ($strHash, $iSize) = $self->hash_size($strPathType, $strFile, $bCompressed, $strHashType); + + return $strHash; +} + +#################################################################################################################################### +# HASH_SIZE +#################################################################################################################################### +sub hash_size +{ + my $self = shift; + my $strPathType = shift; + my $strFile = shift; + my $bCompressed = shift; + my $strHashType = shift; + + # Set defaults + $bCompressed = defined($bCompressed) ? $bCompressed : false; + $strHashType = defined($strHashType) ? $strHashType : 'sha1'; + # Set operation variables my $strFileOp = $self->path_get($strPathType, $strFile); my $strHash; + my $iSize = 0; # Set operation and debug strings my $strOperation = OP_FILE_HASH; - my $strDebug = "${strPathType}:${strFileOp}"; + my $strDebug = "${strPathType}:${strFileOp}, " . + 'compressed = ' . ($bCompressed ? 'true' : 'false') . ', ' . + "hash_type = ${strHashType}"; &log(DEBUG, "${strOperation}: ${strDebug}"); if ($self->is_remote($strPathType)) @@ -781,16 +808,104 @@ sub hash confess &log(ERROR, "${strDebug}: " . $strError); } - my $oSHA = Digest::SHA->new(defined($strHashType) ? $strHashType : 'sha1'); + my $oSHA = Digest::SHA->new($strHashType); - $oSHA->addfile($hFile); + if ($bCompressed) + { + ($strHash, $iSize) = + $self->{oRemote}->binary_xfer($hFile, undef, 'in', true, false, false); + } + else + { + my $iBlockSize; + my $tBuffer; + + do + { + # Read a block from the file + $iBlockSize = sysread($hFile, $tBuffer, 4194304); + + if (!defined($iBlockSize)) + { + confess &log(ERROR, "${strFileOp} could not be read: " . $!); + } + + $iSize += $iBlockSize; + $oSHA->add($tBuffer); + } + while ($iBlockSize > 0); + + $strHash = $oSHA->hexdigest(); + } close($hFile); - - $strHash = $oSHA->hexdigest(); } - return $strHash; + return $strHash, $iSize; +} + +#################################################################################################################################### +# OWNER +#################################################################################################################################### +sub owner +{ + my $self = shift; + my $strPathType = shift; + my $strFile = shift; + my $strUser = shift; + my $strGroup = shift; + + # Set operation variables + my $strFileOp = $self->path_get($strPathType, $strFile); + + # Set operation and debug strings + my $strOperation = OP_FILE_OWNER; + my $strDebug = "${strPathType}:${strFileOp}, " . + 'user = ' . (defined($strUser) ? $strUser : '[undef]') . + 'group = ' . (defined($strGroup) ? $strGroup : '[undef]'); + &log(DEBUG, "${strOperation}: ${strDebug}"); + + if ($self->is_remote($strPathType)) + { + confess &log(ASSERT, "${strDebug}: remote operation not supported"); + } + else + { + my $iUserId; + my $iGroupId; + my $oStat; + + if (!defined($strUser) || !defined($strGroup)) + { + $oStat = stat($strFileOp); + + if (!defined($oStat)) + { + confess &log(ERROR, 'unable to stat ${strFileOp}'); + } + } + + if (defined($strUser)) + { + $iUserId = getpwnam($strUser); + } + else + { + $iUserId = $oStat->uid; + } + + if (defined($strGroup)) + { + $iGroupId = getgrnam($strGroup); + } + else + { + $iGroupId = $oStat->gid; + } + + chown($iUserId, $iGroupId, $strFileOp) + or confess &log(ERROR, "unable to set ownership for ${strFileOp}"); + } } #################################################################################################################################### @@ -903,6 +1018,47 @@ sub list return @stryFileList; } +#################################################################################################################################### +# WAIT +# +# Wait until the next second. This is done in the file object because it must be performed on whichever side the db is on, local or +# remote. This function is used to make sure that no files are copied in the same second as the manifest is created. The reason is +# that the db might modify they file again in the same second as the copy and that change will not be visible to a subsequent +# incremental backup using timestamp/size to determine deltas. +#################################################################################################################################### +sub wait +{ + my $self = shift; + my $strPathType = shift; + + # Set operation and debug strings + my $strOperation = OP_FILE_WAIT; + my $strDebug = "${strPathType}"; + &log(DEBUG, "${strOperation}: ${strDebug}"); + + # Second when the function was called + my $lTimeBegin; + + # Run remotely + if ($self->is_remote($strPathType)) + { + # Add remote info to debug string + $strDebug = "${strOperation}: remote: ${strDebug}"; + &log(TRACE, "${strOperation}: remote"); + + # Execute the command + $lTimeBegin = $self->{oRemote}->command_execute($strOperation, undef, true, $strDebug); + } + # Run locally + else + { + # Wait the remainder of the current second + $lTimeBegin = wait_remainder(); + } + + return $lTimeBegin; +} + #################################################################################################################################### # MANIFEST # @@ -1101,10 +1257,10 @@ sub manifest_recurse # Get group name ${$oManifestHashRef}{name}{"${strFile}"}{group} = getgrgid($oStat->gid); - # Get permissions + # Get mode if (${$oManifestHashRef}{name}{"${strFile}"}{type} ne 'l') { - ${$oManifestHashRef}{name}{"${strFile}"}{permission} = sprintf('%04o', S_IMODE($oStat->mode)); + ${$oManifestHashRef}{name}{"${strFile}"}{mode} = sprintf('%04o', S_IMODE($oStat->mode)); } # Recurse into directories @@ -1123,7 +1279,7 @@ sub manifest_recurse # * source and destination can be local or remote # * wire and output compression/decompression are supported # * intermediate temp files are used to prevent partial copies -# * modification time and permissions can be set on destination file +# * modification time, mode, and ownership can be set on destination file # * destination path can optionally be created #################################################################################################################################### sub copy @@ -1137,14 +1293,18 @@ sub copy my $bDestinationCompress = shift; my $bIgnoreMissingSource = shift; my $lModificationTime = shift; - my $strPermission = shift; + my $strMode = shift; my $bDestinationPathCreate = shift; + my $strUser = shift; + my $strGroup = shift; + my $bAppendChecksum = shift; # Set defaults $bSourceCompressed = defined($bSourceCompressed) ? $bSourceCompressed : false; $bDestinationCompress = defined($bDestinationCompress) ? $bDestinationCompress : false; $bIgnoreMissingSource = defined($bIgnoreMissingSource) ? $bIgnoreMissingSource : false; $bDestinationPathCreate = defined($bDestinationPathCreate) ? $bDestinationPathCreate : false; + $bAppendChecksum = defined($bAppendChecksum) ? $bAppendChecksum : false; # Set working variables my $bSourceRemote = $self->is_remote($strSourcePathType) || $strSourcePathType eq PIPE_STDIN; @@ -1156,6 +1316,11 @@ sub copy my $strDestinationTmpOp = $strDestinationPathType eq PIPE_STDOUT ? undef : $self->path_get($strDestinationPathType, $strDestinationFile, true); + # Checksum and size variables + my $strChecksum = undef; + my $iFileSize = undef; + my $bResult = true; + # Set debug string and log my $strDebug = ($bSourceRemote ? ' remote' : ' local') . " ${strSourcePathType}" . (defined($strSourceFile) ? ":${strSourceOp}" : '') . @@ -1164,7 +1329,11 @@ sub copy ', source_compressed = ' . ($bSourceCompressed ? 'true' : 'false') . ', destination_compress = ' . ($bDestinationCompress ? 'true' : 'false') . ', ignore_missing_source = ' . ($bIgnoreMissingSource ? 'true' : 'false') . - ', destination_path_create = ' . ($bDestinationPathCreate ? 'true' : 'false'); + ', destination_path_create = ' . ($bDestinationPathCreate ? 'true' : 'false') . + ', modification_time = ' . (defined($lModificationTime) ? $lModificationTime : '[undef]') . + ', mode = ' . (defined($strMode) ? $strMode : '[undef]') . + ', user = ' . (defined($strUser) ? $strUser : '[undef]') . + ', group = ' . (defined($strGroup) ? $strGroup : '[undef]'); &log(DEBUG, OP_FILE_COPY . ": ${strDebug}"); # Open the source and destination files (if needed) @@ -1185,7 +1354,7 @@ sub copy if ($bIgnoreMissingSource && $strDestinationPathType ne PIPE_STDOUT) { - return false; + return false, undef, undef; } } @@ -1195,7 +1364,7 @@ sub copy { if ($strDestinationPathType eq PIPE_STDOUT) { - $self->{oRemote}->write_line(*STDOUT, 'block 0'); + $self->{oRemote}->write_line(*STDOUT, 'block -1'); } confess &log(ERROR, $strError, $iErrorCode); @@ -1264,6 +1433,7 @@ sub copy { $oParamHash{source_file} = $strSourceOp; $oParamHash{source_compressed} = $bSourceCompressed; + $oParamHash{destination_compress} = $bDestinationCompress; $hIn = $self->{oRemote}->{hOut}; } @@ -1282,12 +1452,28 @@ sub copy else { $oParamHash{destination_file} = $strDestinationOp; + $oParamHash{source_compressed} = $bSourceCompressed; $oParamHash{destination_compress} = $bDestinationCompress; $oParamHash{destination_path_create} = $bDestinationPathCreate; - if (defined($strPermission)) + if (defined($strMode)) { - $oParamHash{permission} = $strPermission; + $oParamHash{mode} = $strMode; + } + + if (defined($strUser)) + { + $oParamHash{user} = $strUser; + } + + if (defined($strGroup)) + { + $oParamHash{group} = $strGroup; + } + + if ($bAppendChecksum) + { + $oParamHash{append_checksum} = true; } $hOut = $self->{oRemote}->{hIn}; @@ -1304,15 +1490,30 @@ sub copy $oParamHash{destination_compress} = $bDestinationCompress; $oParamHash{destination_path_create} = $bDestinationPathCreate; - if (defined($strPermission)) + if (defined($strMode)) { - $oParamHash{permission} = $strPermission; + $oParamHash{mode} = $strMode; + } + + if (defined($strUser)) + { + $oParamHash{user} = $strUser; + } + + if (defined($strGroup)) + { + $oParamHash{group} = $strGroup; } if ($bIgnoreMissingSource) { $oParamHash{ignore_missing_source} = $bIgnoreMissingSource; } + + if ($bAppendChecksum) + { + $oParamHash{append_checksum} = true; + } } # Build debug string @@ -1333,7 +1534,8 @@ sub copy # Transfer the file (skip this for copies where both sides are remote) if ($strOperation ne OP_FILE_COPY) { - $self->{oRemote}->binary_xfer($hIn, $hOut, $strRemote, $bSourceCompressed, $bDestinationCompress); + ($strChecksum, $iFileSize) = + $self->{oRemote}->binary_xfer($hIn, $hOut, $strRemote, $bSourceCompressed, $bDestinationCompress); } # If this is the controlling process then wait for OK from remote @@ -1344,7 +1546,47 @@ sub copy eval { - $strOutput = $self->{oRemote}->output_read($strOperation eq OP_FILE_COPY, $strDebug, true); + $strOutput = $self->{oRemote}->output_read(true, $strDebug, true); + + # Check the result of the remote call + if (substr($strOutput, 0, 1) eq 'Y') + { + # If the operation was purely remote, get checksum/size + if ($strOperation eq OP_FILE_COPY || + $strOperation eq OP_FILE_COPY_IN && $bSourceCompressed && !$bDestinationCompress) + { + # Checksum shouldn't already be set + if (defined($strChecksum) || defined($iFileSize)) + { + confess &log(ASSERT, "checksum and size are already defined, but shouldn't be"); + } + + # Parse output and check to make sure tokens are defined + my @stryToken = split(/ /, $strOutput); + + if (!defined($stryToken[1]) || !defined($stryToken[2]) || + $stryToken[1] eq '?' && $stryToken[2] eq '?') + { + confess &log(ERROR, "invalid return from copy" . (defined($strOutput) ? ": ${strOutput}" : '')); + } + + # Read the checksum and size + if ($stryToken[1] ne '?') + { + $strChecksum = $stryToken[1]; + } + + if ($stryToken[2] ne '?') + { + $iFileSize = $stryToken[2]; + } + } + } + # Remote called returned false + else + { + $bResult = false; + } }; # If there is an error then evaluate @@ -1360,38 +1602,38 @@ sub copy close($hDestinationFile) or confess &log(ERROR, "cannot close file ${strDestinationTmpOp}"); unlink($strDestinationTmpOp) or confess &log(ERROR, "cannot remove file ${strDestinationTmpOp}"); - return false; + return false, undef, undef; } - # Otherwise report the error confess $oMessage; } - - # If this was a remote copy, then return the result - if ($strOperation eq OP_FILE_COPY) - { - return false; #$strOutput eq 'N' ? true : false; - } } } # Else this is a local operation else { - # If the source is compressed and the destination is not then decompress - if ($bSourceCompressed && !$bDestinationCompress) + # If the source is not compressed and the destination is then compress + if (!$bSourceCompressed && $bDestinationCompress) { - gunzip($hSourceFile => $hDestinationFile) - or die confess &log(ERROR, "${strDebug}: unable to uncompress: " . $GunzipError); + ($strChecksum, $iFileSize) = + $self->{oRemote}->binary_xfer($hSourceFile, $hDestinationFile, 'out', false, true, false); } - elsif (!$bSourceCompressed && $bDestinationCompress) + # If the source is compressed and the destination is not then decompress + elsif ($bSourceCompressed && !$bDestinationCompress) { - gzip($hSourceFile => $hDestinationFile) - or die confess &log(ERROR, "${strDebug}: unable to compress: " . $GzipError); + ($strChecksum, $iFileSize) = + $self->{oRemote}->binary_xfer($hSourceFile, $hDestinationFile, 'in', true, false, false); + } + # Else both side are compressed, so copy capturing checksum + elsif ($bSourceCompressed) + { + ($strChecksum, $iFileSize) = + $self->{oRemote}->binary_xfer($hSourceFile, $hDestinationFile, 'out', true, true, false); } else { - cp($hSourceFile, $hDestinationFile) - or die confess &log(ERROR, "${strDebug}: unable to copy: " . $!); + ($strChecksum, $iFileSize) = + $self->{oRemote}->binary_xfer($hSourceFile, $hDestinationFile, 'in', false, true, false); } } @@ -1407,14 +1649,22 @@ sub copy close($hDestinationFile) or confess &log(ERROR, "cannot close file ${strDestinationTmpOp}"); } - # Where the destination is local, set permissions, modification time, and perform move to final location - if (!$bDestinationRemote) + # Checksum and file size should be set if the destination is not remote + if ($bResult && + !(!$bSourceRemote && $bDestinationRemote && $bSourceCompressed) && + (!defined($strChecksum) || !defined($iFileSize))) { - # Set the file permission if required - if (defined($strPermission)) + confess &log(ASSERT, "${strDebug}: checksum or file size not set"); + } + + # Where the destination is local, set mode, modification time, and perform move to final location + if ($bResult && !$bDestinationRemote) + { + # Set the file Mode if required + if (defined($strMode)) { - chmod(oct($strPermission), $strDestinationTmpOp) - or confess &log(ERROR, "unable to set permissions for local ${strDestinationTmpOp}"); + chmod(oct($strMode), $strDestinationTmpOp) + or confess &log(ERROR, "unable to set mode for local ${strDestinationTmpOp}"); } # Set the file modification time if required @@ -1424,12 +1674,33 @@ sub copy or confess &log(ERROR, "unable to set time for local ${strDestinationTmpOp}"); } + # set user and/or group if required + if (defined($strUser) || defined($strGroup)) + { + $self->owner(PATH_ABSOLUTE, $strDestinationTmpOp, $strUser, $strGroup); + } + + # Replace checksum in destination filename (if exists) + if ($bAppendChecksum) + { + # Replace destination filename + if ($bDestinationCompress) + { + $strDestinationOp = + substr($strDestinationOp, 0, length($strDestinationOp) - length($self->{strCompressExtension}) - 1) . + '-' . $strChecksum . '.' . $self->{strCompressExtension}; + } + else + { + $strDestinationOp .= '-' . $strChecksum; + } + } + # Move the file from tmp to final destination $self->move(PATH_ABSOLUTE, $strDestinationTmpOp, PATH_ABSOLUTE, $strDestinationOp, true); } - return true; + return $bResult, $strChecksum, $iFileSize; } -no Moose; -__PACKAGE__->meta->make_immutable; +1; diff --git a/lib/BackRest/Manifest.pm b/lib/BackRest/Manifest.pm new file mode 100644 index 000000000..a8edf3265 --- /dev/null +++ b/lib/BackRest/Manifest.pm @@ -0,0 +1,751 @@ +#################################################################################################################################### +# MANIFEST MODULE +#################################################################################################################################### +package BackRest::Manifest; + +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename qw(dirname basename); +use Time::Local qw(timelocal); +use Digest::SHA; + +use lib dirname($0); +use BackRest::Exception qw(ERROR_CHECKSUM ERROR_FORMAT); +use BackRest::Utility; +use BackRest::File; + +# Exports +use Exporter qw(import); +our @EXPORT = qw(MANIFEST_PATH MANIFEST_FILE MANIFEST_LINK + + MANIFEST_SECTION_BACKUP MANIFEST_SECTION_BACKUP_OPTION MANIFEST_SECTION_BACKUP_PATH + MANIFEST_SECTION_BACKUP_TABLESPACE + + MANIFEST_KEY_ARCHIVE_START MANIFEST_KEY_ARCHIVE_STOP MANIFEST_KEY_BASE MANIFEST_KEY_CHECKSUM MANIFEST_KEY_COMPRESS + MANIFEST_KEY_HARDLINK MANIFEST_KEY_LABEL MANIFEST_KEY_PRIOR MANIFEST_KEY_REFERENCE MANIFEST_KEY_TIMESTAMP_COPY_START + MANIFEST_KEY_TIMESTAMP_START MANIFEST_KEY_TIMESTAMP_STOP MANIFEST_KEY_TYPE MANIFEST_KEY_VERSION + + MANIFEST_SUBKEY_CHECKSUM MANIFEST_SUBKEY_DESTINATION MANIFEST_SUBKEY_EXISTS MANIFEST_SUBKEY_FUTURE + MANIFEST_SUBKEY_GROUP MANIFEST_SUBKEY_LINK MANIFEST_SUBKEY_MODE MANIFEST_SUBKEY_MODIFICATION_TIME + MANIFEST_SUBKEY_PATH MANIFEST_SUBKEY_REFERENCE MANIFEST_SUBKEY_SIZE MANIFEST_SUBKEY_USER); + +#################################################################################################################################### +# File/path constants +#################################################################################################################################### +use constant FILE_MANIFEST => 'backup.manifest'; + +push @EXPORT, qw(FILE_MANIFEST); + +#################################################################################################################################### +# MANIFEST Constants +#################################################################################################################################### +use constant +{ + MANIFEST_PATH => 'path', + MANIFEST_FILE => 'file', + MANIFEST_LINK => 'link', + + MANIFEST_SECTION_BACKUP => 'backup', + MANIFEST_SECTION_BACKUP_OPTION => 'backup:option', + MANIFEST_SECTION_BACKUP_PATH => 'backup:path', + MANIFEST_SECTION_BACKUP_TABLESPACE => 'backup:tablespace', + + MANIFEST_KEY_ARCHIVE_START => 'archive-start', + MANIFEST_KEY_ARCHIVE_STOP => 'archive-stop', + MANIFEST_KEY_BASE => 'base', + MANIFEST_KEY_CHECKSUM => 'checksum', + MANIFEST_KEY_COMPRESS => 'compress', + MANIFEST_KEY_FORMAT => 'format', + MANIFEST_KEY_HARDLINK => 'hardlink', + MANIFEST_KEY_LABEL => 'label', + MANIFEST_KEY_PRIOR => 'prior', + MANIFEST_KEY_REFERENCE => 'reference', + MANIFEST_KEY_TIMESTAMP_COPY_START => 'timestamp-copy-start', + MANIFEST_KEY_TIMESTAMP_START => 'timestamp-start', + MANIFEST_KEY_TIMESTAMP_STOP => 'timestamp-stop', + MANIFEST_KEY_TYPE => 'type', + MANIFEST_KEY_VERSION => 'version', + + MANIFEST_SUBKEY_CHECKSUM => 'checksum', + MANIFEST_SUBKEY_DESTINATION => 'link_destination', + MANIFEST_SUBKEY_EXISTS => 'exists', + MANIFEST_SUBKEY_FUTURE => 'future', + MANIFEST_SUBKEY_GROUP => 'group', + MANIFEST_SUBKEY_LINK => 'link', + MANIFEST_SUBKEY_MODE => 'mode', + MANIFEST_SUBKEY_MODIFICATION_TIME => 'modification_time', + MANIFEST_SUBKEY_PATH => 'path', + MANIFEST_SUBKEY_REFERENCE => 'reference', + MANIFEST_SUBKEY_SIZE => 'size', + MANIFEST_SUBKEY_USER => 'user' +}; + +#################################################################################################################################### +# CONSTRUCTOR +#################################################################################################################################### +sub new +{ + my $class = shift; # Class name + my $strFileName = shift; # Manifest filename + my $bLoad = shift; # Load the manifest? + + # Create the class hash + my $self = {}; + bless $self, $class; + + # Filename must be specified + if (!defined($strFileName)) + { + confess &log(ASSERT, 'filename must be provided'); + } + + # Set variables + my $oManifest = {}; + $self->{oManifest} = $oManifest; + $self->{strFileName} = $strFileName; + + # Load the manifest if specified + if (!(defined($bLoad) && $bLoad == false)) + { + ini_load($strFileName, $oManifest); + + # Make sure the manifest is valid by testing checksum + my $strChecksum = $self->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_CHECKSUM); + my $strTestChecksum = $self->hash(); + + if ($strChecksum ne $strTestChecksum) + { + confess &log(ERROR, "backup.manifest checksum is invalid, should be ${strTestChecksum}", ERROR_CHECKSUM); + } + + # Make sure that the format is current, otherwise error + my $iFormat = $self->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_FORMAT, undef, false, 0); + + if ($iFormat != FORMAT) + { + confess &log(ERROR, "backup format of ${strFileName} is ${iFormat} but " . FORMAT . ' is required by this version of ' . + 'PgBackRest. If you are attempting an incr/diff backup you will need to take a new full backup. ' . + "If you are trying to restore, you''ll need to use a version that supports format ${iFormat}." , + ERROR_FORMAT); + } + } + else + { + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_FORMAT, undef, FORMAT); + } + + return $self; +} + +#################################################################################################################################### +# SAVE +# +# Save the manifest. +#################################################################################################################################### +sub save +{ + my $self = shift; + + # Create the checksum + $self->hash(); + + # Save the config file + ini_save($self->{strFileName}, $self->{oManifest}); +} + +#################################################################################################################################### +# HASH +# +# Generate hash for the manifest. +#################################################################################################################################### +sub hash +{ + my $self = shift; + + my $oManifest = $self->{oManifest}; + + # Remove the old checksum + $self->remove(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_CHECKSUM); + + my $oSHA = Digest::SHA->new('sha1'); + + # Calculate the checksum from section values + foreach my $strSection ($self->keys()) + { + $oSHA->add($strSection); + + # Calculate the checksum from key values + foreach my $strKey ($self->keys($strSection)) + { + $oSHA->add($strKey); + + my $strValue = $self->get($strSection, $strKey); + + if (!defined($strValue)) + { + confess &log(ASSERT, "section ${strSection}, key ${$strKey} has undef value"); + } + + # Calculate the checksum from subkey values + if (ref($strValue) eq "HASH") + { + foreach my $strSubKey ($self->keys($strSection, $strKey)) + { + my $strSubValue = $self->get($strSection, $strKey, $strSubKey); + + if (!defined($strSubValue)) + { + confess &log(ASSERT, "section ${strSection}, key ${strKey}, subkey ${strSubKey} has undef value"); + } + + $oSHA->add($strSubValue); + } + } + else + { + $oSHA->add($strValue); + } + } + } + + # Set the new checksum + my $strHash = $oSHA->hexdigest(); + + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_CHECKSUM, undef, $strHash); + + return $strHash; +} + +#################################################################################################################################### +# GET +# +# Get a value. +#################################################################################################################################### +sub get +{ + my $self = shift; + my $strSection = shift; + my $strValue = shift; + my $strSubValue = shift; + my $bRequired = shift; + my $oDefault = shift; + + my $oManifest = $self->{oManifest}; + + # Section must always be defined + if (!defined($strSection)) + { + confess &log(ASSERT, 'section is not defined'); + } + + # Set default for required + $bRequired = defined($bRequired) ? $bRequired : true; + + # Store the result + my $oResult = undef; + + if (defined($strSubValue)) + { + if (!defined($strValue)) + { + confess &log(ASSERT, 'subvalue requested bu value is not defined'); + } + + if (defined(${$oManifest}{$strSection}{$strValue})) + { + $oResult = ${$oManifest}{$strSection}{$strValue}{$strSubValue}; + } + } + elsif (defined($strValue)) + { + if (defined(${$oManifest}{$strSection})) + { + $oResult = ${$oManifest}{$strSection}{$strValue}; + } + } + else + { + $oResult = ${$oManifest}{$strSection}; + } + + if (!defined($oResult) && $bRequired) + { + confess &log(ASSERT, "manifest section '$strSection'" . (defined($strValue) ? ", value '$strValue'" : '') . + (defined($strSubValue) ? ", subvalue '$strSubValue'" : '') . ' is required but not defined'); + } + + if (!defined($oResult) && defined($oDefault)) + { + $oResult = $oDefault; + } + + return $oResult +} + +#################################################################################################################################### +# SET +# +# Set a value. +#################################################################################################################################### +sub set +{ + my $self = shift; + my $strSection = shift; + my $strKey = shift; + my $strSubKey = shift; + my $strValue = shift; + + my $oManifest = $self->{oManifest}; + + # Make sure the keys are valid + $self->valid($strSection, $strKey, $strSubKey); + + if (defined($strSubKey)) + { + ${$oManifest}{$strSection}{$strKey}{$strSubKey} = $strValue; + } + else + { + ${$oManifest}{$strSection}{$strKey} = $strValue; + } +} + +#################################################################################################################################### +# REMOVE +# +# Remove a value. +#################################################################################################################################### +sub remove +{ + my $self = shift; + my $strSection = shift; + my $strKey = shift; + my $strSubKey = shift; + my $strValue = shift; + + my $oManifest = $self->{oManifest}; + + # Make sure the keys are valid + $self->valid($strSection, $strKey, $strSubKey, undef, true); + + if (defined($strSubKey)) + { + delete(${$oManifest}{$strSection}{$strKey}{$strSubKey}); + } + else + { + delete(${$oManifest}{$strSection}{$strKey}); + } +} + +#################################################################################################################################### +# VALID +# +# Determine if section, key, subkey combination is valid. +#################################################################################################################################### +sub valid +{ + my $self = shift; + my $strSection = shift; + my $strKey = shift; + my $strSubKey = shift; + my $strValue = shift; + my $bDelete = shift; + + # Section and key must always be defined + if (!defined($strSection) || !defined($strKey)) + { + confess &log(ASSERT, 'section or key is not defined'); + } + + # Default bDelete + $bDelete = defined($bDelete) ? $bDelete : false; + + if ($strSection =~ /^.*\:(file|path|link)$/ && $strSection !~ /^backup\:path$/) + { + if (!defined($strSubKey) && $bDelete) + { + return true; + } + + my $strPath = (split(':', $strSection))[0]; + my $strType = (split(':', $strSection))[1]; + + if ($strPath eq 'tablespace') + { + $strPath = (split(':', $strSection))[1]; + $strType = (split(':', $strSection))[2]; + } + + if (($strType eq 'path' || $strType eq 'file' || $strType eq 'link') && + ($strSubKey eq MANIFEST_SUBKEY_USER || + $strSubKey eq MANIFEST_SUBKEY_GROUP)) + { + return true; + } + elsif (($strType eq 'path' || $strType eq 'file') && + ($strSubKey eq MANIFEST_SUBKEY_MODE)) + { + return true; + } + elsif ($strType eq 'file' && + ($strSubKey eq MANIFEST_SUBKEY_CHECKSUM || + $strSubKey eq MANIFEST_SUBKEY_EXISTS || + $strSubKey eq MANIFEST_SUBKEY_FUTURE || + $strSubKey eq MANIFEST_SUBKEY_MODIFICATION_TIME || + $strSubKey eq MANIFEST_SUBKEY_REFERENCE || + $strSubKey eq MANIFEST_SUBKEY_SIZE)) + { + return true; + } + elsif ($strType eq 'link' && + $strSubKey eq MANIFEST_SUBKEY_DESTINATION) + { + return true; + } + } + if ($strSection eq MANIFEST_SECTION_BACKUP) + { + if ($strKey eq MANIFEST_KEY_ARCHIVE_START || + $strKey eq MANIFEST_KEY_ARCHIVE_STOP || + $strKey eq MANIFEST_KEY_CHECKSUM || + $strKey eq MANIFEST_KEY_FORMAT || + $strKey eq MANIFEST_KEY_LABEL || + $strKey eq MANIFEST_KEY_PRIOR || + $strKey eq MANIFEST_KEY_REFERENCE || + $strKey eq MANIFEST_KEY_TIMESTAMP_COPY_START || + $strKey eq MANIFEST_KEY_TIMESTAMP_START || + $strKey eq MANIFEST_KEY_TIMESTAMP_STOP || + $strKey eq MANIFEST_KEY_TYPE || + $strKey eq MANIFEST_KEY_VERSION) + { + return true; + } + } + elsif ($strSection eq MANIFEST_SECTION_BACKUP_OPTION) + { + if ($strKey eq MANIFEST_KEY_CHECKSUM || + $strKey eq MANIFEST_KEY_COMPRESS || + $strKey eq MANIFEST_KEY_HARDLINK) + { + return true; + } + } + elsif ($strSection eq MANIFEST_SECTION_BACKUP_TABLESPACE) + { + if ($strSubKey eq 'link' || + $strSubKey eq 'path') + { + return true; + } + } + elsif ($strSection eq MANIFEST_SECTION_BACKUP_PATH) + { + if ($strKey eq 'base' || $strKey =~ /^tablespace\:.*$/) + { + return true; + } + } + + confess &log(ASSERT, "manifest section '${strSection}', key '${strKey}'" . + (defined($strSubKey) ? ", subkey '$strSubKey'" : '') . ' is not valid'); +} + +#################################################################################################################################### +# epoch +# +# Retrieves a value in the format YYYY-MM-DD HH24:MI:SS and converts to epoch time. +#################################################################################################################################### +sub epoch +{ + my $self = shift; + my $strSection = shift; + my $strKey = shift; + my $strSubKey = shift; + + my $strValue = $self->get($strSection, $strKey, $strSubKey); + + my ($iYear, $iMonth, $iDay, $iHour, $iMinute, $iSecond) = split(/[\s\-\:]+/, $strValue); + + return timelocal($iSecond, $iMinute, $iHour, $iDay , $iMonth - 1, $iYear); +} + +#################################################################################################################################### +# KEYS +# +# Get a list of keys. +#################################################################################################################################### +sub keys +{ + my $self = shift; + my $strSection = shift; + my $strKey = shift; + + if (defined($strSection)) + { + if ($self->test($strSection, $strKey)) + { + return sort(keys $self->get($strSection, $strKey)); + } + + return []; + } + + return sort(keys $self->{oManifest}); +} + +#################################################################################################################################### +# TEST +# +# Test a value to see if it equals the supplied test value. If no test value is given, tests that it is defined. +#################################################################################################################################### +sub test +{ + my $self = shift; + my $strSection = shift; + my $strValue = shift; + my $strSubValue = shift; + my $strTest = shift; + + my $strResult = $self->get($strSection, $strValue, $strSubValue, false); + + if (defined($strResult)) + { + if (defined($strTest)) + { + return $strResult eq $strTest ? true : false; + } + + return true; + } + + return false; +} + +#################################################################################################################################### +# BUILD +# +# Build the manifest object. +#################################################################################################################################### +sub build +{ + my $self = shift; + my $oFile = shift; + my $strDbClusterPath = shift; + my $oLastManifest = shift; + my $bNoStartStop = shift; + my $oTablespaceMapRef = shift; + my $strLevel = shift; + + &log(DEBUG, 'Manifest->build'); + + # If no level is defined then it must be base + if (!defined($strLevel)) + { + $strLevel = 'base'; + + if (defined($oLastManifest)) + { + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, + $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL)); + } + + # If bNoStartStop then build the tablespace map from pg_tblspc path + if ($bNoStartStop) + { + $oTablespaceMapRef = {}; + + my %oTablespaceManifestHash; + $oFile->manifest(PATH_DB_ABSOLUTE, $strDbClusterPath . '/pg_tblspc', \%oTablespaceManifestHash); + + foreach my $strName (sort(CORE::keys $oTablespaceManifestHash{name})) + { + if ($strName eq '.' or $strName eq '..') + { + next; + } + + if ($oTablespaceManifestHash{name}{$strName}{type} ne 'l') + { + confess &log(ERROR, "pg_tblspc/${strName} is not a link"); + } + + &log(DEBUG, "Found tablespace ${strName}"); + + ${$oTablespaceMapRef}{oid}{$strName}{name} = $strName; + } + } + } + + # Get the manifest for this level + my %oManifestHash; + $oFile->manifest(PATH_DB_ABSOLUTE, $strDbClusterPath, \%oManifestHash); + + $self->set(MANIFEST_SECTION_BACKUP_PATH, $strLevel, undef, $strDbClusterPath); + + # Loop though all paths/files/links in the manifest + foreach my $strName (sort(CORE::keys $oManifestHash{name})) + { + # Skip certain files during backup + if (($strName =~ /^pg\_xlog\/.*/ && !$bNoStartStop) || # pg_xlog/ - this will be reconstructed + $strName =~ /^postmaster\.pid$/ || # postmaster.pid - to avoid confusing postgres when restoring + $strName =~ /^recovery\.conf$/) # recovery.conf - doesn't make sense to backup this file + { + next; + } + + my $cType = $oManifestHash{name}{"${strName}"}{type}; + my $strLinkDestination = $oManifestHash{name}{"${strName}"}{link_destination}; + my $strSection = "${strLevel}:path"; + + if ($cType eq 'f') + { + $strSection = "${strLevel}:file"; + } + elsif ($cType eq 'l') + { + $strSection = "${strLevel}:link"; + } + elsif ($cType ne 'd') + { + confess &log(ASSERT, "unrecognized file type $cType for file $strName"); + } + + # User and group required for all types + $self->set($strSection, $strName, MANIFEST_SUBKEY_USER, $oManifestHash{name}{"${strName}"}{user}); + $self->set($strSection, $strName, MANIFEST_SUBKEY_GROUP, $oManifestHash{name}{"${strName}"}{group}); + + # Mode for required file and path type only + if ($cType eq 'f' || $cType eq 'd') + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_MODE, $oManifestHash{name}{"${strName}"}{mode}); + } + + # Modification time and size required for file type only + if ($cType eq 'f') + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME, + $oManifestHash{name}{"${strName}"}{modification_time} + 0); + $self->set($strSection, $strName, MANIFEST_SUBKEY_SIZE, $oManifestHash{name}{"${strName}"}{size} + 0); + } + + # Link destination required for link type only + if ($cType eq 'l') + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_DESTINATION, + $oManifestHash{name}{"${strName}"}{link_destination}); + + # If this is a tablespace then follow the link + if (index($strName, 'pg_tblspc/') == 0 && $strLevel eq 'base') + { + my $strTablespaceOid = basename($strName); + my $strTablespaceName = ${$oTablespaceMapRef}{oid}{$strTablespaceOid}{name}; + + $self->set(MANIFEST_SECTION_BACKUP_TABLESPACE, $strTablespaceName, + MANIFEST_SUBKEY_LINK, $strTablespaceOid); + $self->set(MANIFEST_SECTION_BACKUP_TABLESPACE, $strTablespaceName, + MANIFEST_SUBKEY_PATH, $strLinkDestination); + + $self->build($oFile, $strLinkDestination, $oLastManifest, $bNoStartStop, $oTablespaceMapRef, + "tablespace:${strTablespaceName}"); + } + } + } + + # If this is the base level then do post-processing + if ($strLevel eq 'base') + { + my $bTimeInFuture = false; + + my $lTimeBegin = $oFile->wait(PATH_DB_ABSOLUTE); + + # Loop through all backup paths (base and tablespaces) + foreach my $strPathKey ($self->keys(MANIFEST_SECTION_BACKUP_PATH)) + { + my $strSection = "${strPathKey}:file"; + + # Make sure file section exists + if ($self->test($strSection)) + { + # Loop though all files + foreach my $strName ($self->keys($strSection)) + { + # If modification time is in the future (in this backup OR the last backup) set warning flag and do not + # allow a reference + if ($self->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME) > $lTimeBegin || + (defined($oLastManifest) && $oLastManifest->test($strSection, $strName, MANIFEST_SUBKEY_FUTURE, 'y'))) + { + $bTimeInFuture = true; + + # Only mark as future if still in the future in the current backup + if ($self->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME) > $lTimeBegin) + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_FUTURE, 'y'); + } + } + # Else check if modification time and size are unchanged since last backup + elsif (defined($oLastManifest) && $oLastManifest->test($strSection, $strName) && + $self->get($strSection, $strName, MANIFEST_SUBKEY_SIZE) == + $oLastManifest->get($strSection, $strName, MANIFEST_SUBKEY_SIZE) && + $self->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME) == + $oLastManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME)) + { + # Copy reference from previous backup if possible + if ($oLastManifest->test($strSection, $strName, MANIFEST_SUBKEY_REFERENCE)) + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_REFERENCE, + $oLastManifest->get($strSection, $strName, MANIFEST_SUBKEY_REFERENCE)); + } + # Otherwise the reference is to the previous backup + else + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_REFERENCE, + $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL)); + } + + # Copy the checksum from previous manifest + if ($oLastManifest->test($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM)) + { + $self->set($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM, + $oLastManifest->get($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM)); + } + + # Build the manifest reference list - not used for processing but is useful for debugging + my $strFileReference = $self->get($strSection, $strName, MANIFEST_SUBKEY_REFERENCE); + + my $strManifestReference = $self->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_REFERENCE, + undef, false); + + if (!defined($strManifestReference)) + { + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_REFERENCE, undef, $strFileReference); + } + else + { + if ($strManifestReference !~ /^$strFileReference|,$strFileReference/) + { + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_REFERENCE, undef, + $strManifestReference . ",${strFileReference}"); + } + } + } + } + } + } + + # Warn if any files in the current backup are in the future + if ($bTimeInFuture) + { + &log(WARN, "some files have timestamps in the future - they will be copied to prevent possible race conditions"); + } + + # Record the time when copying will start + $self->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_COPY_START, undef, + timestamp_string_get(undef, $lTimeBegin + 1)); + } + +} + +1; diff --git a/lib/BackRest/Remote.pm b/lib/BackRest/Remote.pm index 414425f01..9cc305329 100644 --- a/lib/BackRest/Remote.pm +++ b/lib/BackRest/Remote.pm @@ -3,84 +3,83 @@ #################################################################################################################################### package BackRest::Remote; -use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); -use Moose; -use Thread::Queue; -use Net::OpenSSH; -use File::Basename; -use IO::Handle; -use POSIX ':sys_wait_h'; -use IO::Compress::Gzip qw(gzip $GzipError); -use IO::Uncompress::Gunzip qw(gunzip $GunzipError); +use Net::OpenSSH qw(); +use File::Basename qw(dirname); +use POSIX qw(:sys_wait_h); +use Scalar::Util qw(blessed); +use Compress::Raw::Zlib qw(WANT_GZIP Z_OK Z_BUF_ERROR Z_STREAM_END); +use IO::String qw(); use lib dirname($0) . '/../lib'; -use BackRest::Exception; -use BackRest::Utility; +use BackRest::Exception qw(ERROR_PROTOCOL); +use BackRest::Utility qw(log version_get trim TRACE ERROR ASSERT true false); #################################################################################################################################### -# Remote xfer default block size constant +# Exports +#################################################################################################################################### +use Exporter qw(import); +our @EXPORT = qw(DB BACKUP NONE); + +#################################################################################################################################### +# DB/BACKUP Constants #################################################################################################################################### use constant { - DEFAULT_BLOCK_SIZE => 1048576 + DB => 'db', + BACKUP => 'backup', + NONE => 'none' }; -#################################################################################################################################### -# Module variables -#################################################################################################################################### -# Protocol strings -has strGreeting => (is => 'ro', default => 'PG_BACKREST_REMOTE'); - -# Command strings -has strCommand => (is => 'bare'); - -# Module variables -has strHost => (is => 'bare'); # Host host -has strUser => (is => 'bare'); # User user -has oSSH => (is => 'bare'); # SSH object - -# Process variables -has pId => (is => 'bare'); # Process Id -has hIn => (is => 'bare'); # Input stream -has hOut => (is => 'bare'); # Output stream -has hErr => (is => 'bare'); # Error stream - -# Thread variables -has iThreadIdx => (is => 'bare'); # Thread index -has oThread => (is => 'bare'); # Thread object -has oThreadQueue => (is => 'bare'); # Thread queue object -has oThreadResult => (is => 'bare'); # Thread result object - -# Block size -has iBlockSize => (is => 'bare', default => DEFAULT_BLOCK_SIZE); # Set block size to default - #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### -sub BUILD +sub new { - my $self = shift; + my $class = shift; # Class name + my $strHost = shift; # Host to connect to for remote (optional as this can also be used on the remote) + my $strUser = shift; # User to connect to for remote (must be set if strHost is set) + my $strCommand = shift; # Command to execute on remote ('remote' if this is the remote) + my $iBlockSize = shift; # Buffer size + my $iCompressLevel = shift; # Set compression level + my $iCompressLevelNetwork = shift; # Set compression level for network only compression - $self->{strGreeting} .= ' ' . version_get(); + # Create the class hash + my $self = {}; + bless $self, $class; - if (defined($self->{strHost})) + # Create the greeting that will be used to check versions with the remote + $self->{strGreeting} = 'PG_BACKREST_REMOTE ' . version_get(); + + # Set default block size + $self->{iBlockSize} = $iBlockSize; + + # Set compress levels + $self->{iCompressLevel} = $iCompressLevel; + $self->{iCompressLevelNetwork} = $iCompressLevelNetwork; + + # If host is defined then make a connnection + if (defined($strHost)) { # User must be defined - if (!defined($self->{strUser})) + if (!defined($strUser)) { confess &log(ASSERT, 'strUser must be defined'); } - # User must be defined - if (!defined($self->{strCommand})) + # Command must be defined + if (!defined($strCommand)) { confess &log(ASSERT, 'strCommand must be defined'); } + $self->{strHost} = $strHost; + $self->{strUser} = $strUser; + $self->{strCommand} = $strCommand; + # Set SSH Options my $strOptionSSHRequestTTY = 'RequestTTY=yes'; my $strOptionSSHCompression = 'Compression=no'; @@ -88,8 +87,8 @@ sub BUILD &log(TRACE, 'connecting to remote ssh host ' . $self->{strHost}); # Make SSH connection - $self->{oSSH} = Net::OpenSSH->new($self->{strHost}, timeout => 300, user => $self->{strUser}, - master_opts => [-o => $strOptionSSHCompression, -o => $strOptionSSHRequestTTY]); + $self->{oSSH} = Net::OpenSSH->new($self->{strHost}, timeout => 600, user => $self->{strUser}, + master_opts => [-o => $strOptionSSHCompression, -o => $strOptionSSHRequestTTY]); $self->{oSSH}->error and confess &log(ERROR, "unable to connect to $self->{strHost}: " . $self->{oSSH}->error); @@ -97,26 +96,43 @@ sub BUILD ($self->{hIn}, $self->{hOut}, $self->{hErr}, $self->{pId}) = $self->{oSSH}->open3($self->{strCommand}); $self->greeting_read(); + $self->setting_write($self->{iBlockSize}, $self->{iCompressLevel}, $self->{iCompressLevelNetwork}); + } + elsif (defined($strCommand) && $strCommand eq 'remote') + { + # Write the greeting so master process knows who we are + $self->greeting_write(); + + # Read settings from master + ($self->{iBlockSize}, $self->{iCompressLevel}, $self->{iCompressLevelNetwork}) = $self->setting_read(); } - $self->{oThreadQueue} = Thread::Queue->new(); - $self->{oThreadResult} = Thread::Queue->new(); - $self->{oThread} = threads->create(\&binary_xfer_thread, $self); + # Check block size + if (!defined($self->{iBlockSize})) + { + confess &log(ASSERT, 'iBlockSize must be set'); + } + + # Check compress levels + if (!defined($self->{iCompressLevel})) + { + confess &log(ASSERT, 'iCompressLevel must be set'); + } + + if (!defined($self->{iCompressLevelNetwork})) + { + confess &log(ASSERT, 'iCompressLevelNetwork must be set'); + } + + return $self; } #################################################################################################################################### -# thread_kill +# THREAD_KILL #################################################################################################################################### sub thread_kill { my $self = shift; - - if (defined($self->{oThread})) - { - $self->{oThreadQueue}->enqueue(undef); - $self->{oThread}->join(); - $self->{oThread} = undef; - } } #################################################################################################################################### @@ -135,15 +151,15 @@ sub DEMOLISH sub clone { my $self = shift; - my $iThreadIdx = shift; return BackRest::Remote->new ( - strCommand => $self->{strCommand}, - strHost => $self->{strHost}, - strUser => $self->{strUser}, - iBlockSize => $self->{iBlockSize}, - iThreadIdx => $iThreadIdx + $self->{strHost}, + $self->{strUser}, + $self->{strCommand}, + $self->{iBlockSize}, + $self->{iCompressLevel}, + $self->{iCompressLevelNetwork} ); } @@ -172,10 +188,50 @@ sub greeting_write { my $self = shift; - if (!syswrite(*STDOUT, "$self->{strGreeting}\n")) + $self->write_line(*STDOUT, $self->{strGreeting}); +} + +#################################################################################################################################### +# SETTING_READ +# +# Read the settings from the master process. +#################################################################################################################################### +sub setting_read +{ + my $self = shift; + + # Tokenize the settings + my @stryToken = split(/ /, $self->read_line(*STDIN)); + + # Make sure there are the correct number of tokens + if (@stryToken != 4) { - confess 'unable to write greeting'; + confess &log(ASSERT, 'settings token count is invalid', ERROR_PROTOCOL); } + + # Check for the setting token just to be sure + if ($stryToken[0] ne 'setting') + { + confess &log(ASSERT, 'settings token 0 must be \'setting\''); + } + + # Return the settings + return $stryToken[1], $stryToken[2], $stryToken[3]; +} + +#################################################################################################################################### +# SETTING_WRITE +# +# Send settings to the remote process. +#################################################################################################################################### +sub setting_write +{ + my $self = shift; + my $iBlockSize = shift; # Optionally, set the block size (defaults to DEFAULT_BLOCK_SIZE) + my $iCompressLevel = shift; # Set compression level + my $iCompressLevelNetwork = shift; # Set compression level for network only compression + + $self->write_line($self->{hIn}, "setting ${iBlockSize} ${iCompressLevel} ${iCompressLevelNetwork}"); } #################################################################################################################################### @@ -310,13 +366,11 @@ sub write_line my $hOut = shift; my $strBuffer = shift; - $strBuffer = $strBuffer . "\n"; + my $iLineOut = syswrite($hOut, $strBuffer . "\n"); - my $iLineOut = syswrite($hOut, $strBuffer, length($strBuffer)); - - if (!defined($iLineOut) || $iLineOut != length($strBuffer)) + if (!defined($iLineOut) || $iLineOut != length($strBuffer) + 1) { - confess 'unable to write ' . length($strBuffer) . ' byte(s)'; + confess &log(ERROR, "unable to write ${strBuffer}: $!", ERROR_PROTOCOL); } } @@ -352,45 +406,167 @@ sub wait_pid } #################################################################################################################################### -# BINARY_XFER_THREAD +# BLOCK_READ # -# De/Compresses data on a thread. +# Read a block from the protocol layer. #################################################################################################################################### -sub binary_xfer_thread +sub block_read { my $self = shift; + my $hIn = shift; + my $strBlockRef = shift; + my $bProtocol = shift; - while (my $strMessage = $self->{oThreadQueue}->dequeue()) + my $iBlockSize; + my $strMessage; + + if ($bProtocol) { - my @stryMessage = split(':', $strMessage); - my @strHandle = split(',', $stryMessage[1]); + # Read the block header and make sure it's valid + my $strBlockHeader = $self->read_line($hIn); - my $hIn = IO::Handle->new_from_fd($strHandle[0], '<'); - my $hOut = IO::Handle->new_from_fd($strHandle[1], '>'); - - $self->{oThreadResult}->enqueue('running'); - - if ($stryMessage[0] eq 'compress') + if ($strBlockHeader !~ /^block -{0,1}[0-9]+( .*){0,1}$/) { - gzip($hIn => $hOut) - or confess &log(ERROR, 'unable to compress: ' . $GzipError); + $self->wait_pid(); + confess "unable to read block header ${strBlockHeader}"; } + + # Get block size from the header + my @stryToken = split(/ /, $strBlockHeader); + $iBlockSize = $stryToken[1]; + $strMessage = $stryToken[2]; + + # If block size is 0 or an error code then undef the buffer + if ($iBlockSize <= 0) + { + undef($$strBlockRef); + } + # Else read the block else { - gunzip($hIn => $hOut) - or die confess &log(ERROR, 'unable to uncompress: ' . $GunzipError); + my $iBlockRead = 0; + my $iBlockIn = 0; + my $iOffset = defined($$strBlockRef) ? length($$strBlockRef) : 0; + + # !!! Would be nice to modify this with a non-blocking read + # http://docstore.mik.ua/orelly/perl/cookbook/ch07_15.htm + + # Read as many chunks as it takes to get the full block + while ($iBlockRead != $iBlockSize) + { + $iBlockIn = sysread($hIn, $$strBlockRef, $iBlockSize - $iBlockRead, $iBlockRead + $iOffset); + + if (!defined($iBlockIn)) + { + my $strError = $!; + + $self->wait_pid(); + confess "only read ${iBlockRead}/${iBlockSize} block bytes from remote" . + (defined($strError) ? ": ${strError}" : ''); + } + + $iBlockRead += $iBlockIn; + } } + } + else + { + $iBlockSize = $self->stream_read($hIn, $strBlockRef, $self->{iBlockSize}, + defined($$strBlockRef) ? length($$strBlockRef) : 0); + } - close($hOut); + # Return the block size + return $iBlockSize, $strMessage; +} - $self->{oThreadResult}->enqueue('complete'); +#################################################################################################################################### +# BLOCK_WRITE +# +# Write a block to the protocol layer. +#################################################################################################################################### +sub block_write +{ + my $self = shift; + my $hOut = shift; + my $tBlockRef = shift; + my $iBlockSize = shift; + my $bProtocol = shift; + my $strMessage = shift; + + # If block size is not defined, get it from buffer length + $iBlockSize = defined($iBlockSize) ? $iBlockSize : length($$tBlockRef); + + # Write block header to the protocol stream + if ($bProtocol) + { + $self->write_line($hOut, "block ${iBlockSize}" . (defined($strMessage) ? " ${strMessage}" : '')); + } + + # Write block if size > 0 + if ($iBlockSize > 0) + { + $self->stream_write($hOut, $tBlockRef, $iBlockSize); + } +} + +#################################################################################################################################### +# STREAM_READ +# +# Read data from a stream. +#################################################################################################################################### +sub stream_read +{ + my $self = shift; + my $hIn = shift; + my $tBlockRef = shift; + my $iBlockSize = shift; + my $bOffset = shift; + + # Read a block from the stream + my $iBlockIn = sysread($hIn, $$tBlockRef, $iBlockSize, $bOffset ? length($$tBlockRef) : false); + + if (!defined($iBlockIn)) + { + $self->wait_pid(); + confess &log(ERROR, 'unable to read'); + } + + return $iBlockIn; +} + +#################################################################################################################################### +# STREAM_WRITE +# +# Write data to a stream. +#################################################################################################################################### +sub stream_write +{ + my $self = shift; + my $hOut = shift; + my $tBlockRef = shift; + my $iBlockSize = shift; + + # If block size is not defined, get it from buffer length + $iBlockSize = defined($iBlockSize) ? $iBlockSize : length($$tBlockRef); + + # Write the block + my $iBlockOut = syswrite($hOut, $$tBlockRef, $iBlockSize); + + # Report any errors + if (!defined($iBlockOut) || $iBlockOut != $iBlockSize) + { + my $strError = $!; + + $self->wait_pid(); + confess "unable to write ${iBlockSize} bytes" . (defined($strError) ? ': ' . $strError : ''); } } #################################################################################################################################### # BINARY_XFER # -# Copies data from one file handle to another, optionally compressing or decompressing the data in stream. +# Copies data from one file handle to another, optionally compressing or decompressing the data in stream. If $strRemote != none +# then one side is a protocol stream, though this can be controlled with the bProtocol param. #################################################################################################################################### sub binary_xfer { @@ -400,6 +576,13 @@ sub binary_xfer my $strRemote = shift; my $bSourceCompressed = shift; my $bDestinationCompress = shift; + my $bProtocol = shift; + + # The input stream must be defined (output is optional) + if (!defined($hIn)) + { + confess &log(ASSERT, 'hIn is not defined'); + } # If no remote is defined then set to none if (!defined($strRemote)) @@ -413,186 +596,383 @@ sub binary_xfer $bDestinationCompress = defined($bDestinationCompress) ? $bDestinationCompress : false; } - # Working variables - my $iBlockSize = $self->{iBlockSize}; - my $iBlockIn; - my $iBlockInTotal = $iBlockSize; - my $iBlockOut; - my $iBlockTotal = 0; - my $strBlockHeader; - my $strBlock; - my $oGzip; - my $hPipeIn; - my $hPipeOut; - my $pId; - my $bThreadRunning = false; + # Default protocol to true + $bProtocol = defined($bProtocol) ? $bProtocol : true; + my $strMessage = undef; - # Both the in and out streams must be defined - if (!defined($hIn) || !defined($hOut)) + # Checksum and size + my $strChecksum = undef; + my $iFileSize = undef; + + # Read from the protocol stream + if ($strRemote eq 'in') { - confess &log(ASSERT, 'hIn or hOut is not defined'); - } - - # If this is output and the source is not already compressed - if ($strRemote eq 'out' && !$bSourceCompressed) - { - # Increase the blocksize since we are compressing - $iBlockSize *= 4; - - # Open the in/out pipes - pipe $hPipeOut, $hPipeIn; - - # Queue the compression job with the thread - $self->{oThreadQueue}->enqueue('compress:' . fileno($hIn) . ',' . fileno($hPipeIn)); - - # Wait for the thread to acknowledge that it has duplicated the file handles - my $strMessage = $self->{oThreadResult}->dequeue(); - - # Close input pipe so that thread has the only copy, reset hIn to hPipeOut - if ($strMessage eq 'running') + # If the destination should not be compressed then decompress + if (!$bDestinationCompress) { - close($hPipeIn); - $hIn = $hPipeOut; - } - # If any other message is returned then error - else - { - confess "unknown thread message while waiting for running: ${strMessage}"; - } + my $iBlockSize; + my $tCompressedBuffer; + my $tUncompressedBuffer; + my $iUncompressedBufferSize; - $bThreadRunning = true; - } - # Spawn a child process to do decompression - elsif ($strRemote eq 'in' && !$bDestinationCompress) - { - # Open the in/out pipes - pipe $hPipeOut, $hPipeIn; + # Initialize SHA + my $oSHA; - # Queue the decompression job with the thread - $self->{oThreadQueue}->enqueue('decompress:' . fileno($hPipeOut) . ',' . fileno($hOut)); - - # Wait for the thread to acknowledge that it has duplicated the file handles - my $strMessage = $self->{oThreadResult}->dequeue(); - - # Close output pipe so that thread has the only copy, reset hOut to hPipeIn - if ($strMessage eq 'running') - { - close($hPipeOut); - $hOut = $hPipeIn; - } - # If any other message is returned then error - else - { - confess "unknown thread message while waiting for running: ${strMessage}"; - } - - $bThreadRunning = true; - } - - while (1) - { - if ($strRemote eq 'in') - { - if ($iBlockInTotal == $iBlockSize) + if (!$bProtocol) { - $strBlockHeader = $self->read_line($hIn); + $oSHA = Digest::SHA->new('sha1'); + } - if ($strBlockHeader !~ /^block [0-9]+$/) + # Initialize inflate object and check for errors + my ($oZLib, $iZLibStatus) = + new Compress::Raw::Zlib::Inflate(WindowBits => 15 & $bSourceCompressed ? WANT_GZIP : 0, + Bufsize => $self->{iBlockSize}, LimitOutput => 1); + + if ($iZLibStatus != Z_OK) + { + confess &log(ERROR, "unable create a inflate object: ${iZLibStatus}"); + } + + # Read all input + do + { + # Read a block from the input stream + ($iBlockSize, $strMessage) = $self->block_read($hIn, \$tCompressedBuffer, $bProtocol); + + # Process protocol messages + if (defined($strMessage) && $strMessage eq 'nochecksum') { - $self->wait_pid(); - confess "unable to read block header ${strBlockHeader}"; + $oSHA = Digest::SHA->new('sha1'); + undef($strMessage); } - $iBlockInTotal = 0; - $iBlockTotal += 1; - } - - $iBlockSize = trim(substr($strBlockHeader, index($strBlockHeader, ' ') + 1)); - - if ($iBlockSize != 0) - { - $iBlockIn = sysread($hIn, $strBlock, $iBlockSize - $iBlockInTotal); - - if (!defined($iBlockIn)) + # If the block contains data, decompress it + if ($iBlockSize > 0) { - my $strError = $!; + # Keep looping while there is more to decompress + do + { + # Decompress data + $iZLibStatus = $oZLib->inflate($tCompressedBuffer, $tUncompressedBuffer); + $iUncompressedBufferSize = length($tUncompressedBuffer); - $self->wait_pid(); - confess "unable to read block #${iBlockTotal}/${iBlockSize} bytes from remote" . - (defined($strError) ? ": ${strError}" : ''); + # If status is ok, write the data + if ($iZLibStatus == Z_OK || $iZLibStatus == Z_BUF_ERROR || $iZLibStatus == Z_STREAM_END) + { + if ($iUncompressedBufferSize > 0) + { + # Add data to checksum + if (defined($oSHA)) + { + $oSHA->add($tUncompressedBuffer); + } + + # Write data if hOut is defined + if (defined($hOut)) + { + $self->stream_write($hOut, \$tUncompressedBuffer, $iUncompressedBufferSize); + } + } + } + # Else error, exit so it can be handled + else + { + $iBlockSize = 0; + last; + } + } + while ($iZLibStatus == Z_OK && $iUncompressedBufferSize > 0 && $iBlockSize > 0); } + } + while ($iBlockSize > 0); - $iBlockInTotal += $iBlockIn; - } - else + # Make sure the decompression succeeded (iBlockSize < 0 indicates remote error, handled later) + if ($iBlockSize == 0 && $iZLibStatus != Z_STREAM_END) { - $iBlockIn = 0; + confess &log(ERROR, "unable to inflate stream: ${iZLibStatus}"); } + + # Get checksum and total uncompressed bytes written + if (defined($oSHA)) + { + $strChecksum = $oSHA->hexdigest(); + $iFileSize = $oZLib->total_out(); + }; } + # If the destination should be compressed then just write out the already compressed stream else { - $iBlockIn = sysread($hIn, $strBlock, $iBlockSize); + my $iBlockSize; + my $tBuffer; - if (!defined($iBlockIn)) + # Initialize checksum and size + my $oSHA; + + if (!$bProtocol) { - $self->wait_pid(); - confess &log(ERROR, 'unable to read'); + $oSHA = Digest::SHA->new('sha1'); + $iFileSize = 0; } - } - if ($strRemote eq 'out') - { - $strBlockHeader = "block ${iBlockIn}\n"; - - $iBlockOut = syswrite($hOut, $strBlockHeader); - - if (!defined($iBlockOut) || $iBlockOut != length($strBlockHeader)) + do { - $self->wait_pid(); - confess 'unable to write block header'; + # Read a block from the protocol stream + ($iBlockSize, $strMessage) = $self->block_read($hIn, \$tBuffer, $bProtocol); + + # If the block contains data, write it + if ($iBlockSize > 0) + { + # Add data to checksum and size + if (!$bProtocol) + { + $oSHA->add($tBuffer); + $iFileSize += $iBlockSize; + } + + $self->stream_write($hOut, \$tBuffer, $iBlockSize); + undef($tBuffer); + } } - } + while ($iBlockSize > 0); - if ($iBlockIn > 0) - { - $iBlockOut = syswrite($hOut, $strBlock, $iBlockIn); - - if (!defined($iBlockOut) || $iBlockOut != $iBlockIn) + # Get checksum + if (!$bProtocol) { - $self->wait_pid(); - confess "unable to write ${iBlockIn} bytes" . (defined($!) ? ': ' . $! : ''); - } - } - else - { - last; + $strChecksum = $oSHA->hexdigest(); + }; } } - - if ($bThreadRunning) + # Read from file input stream + else { - # Make sure the de/compress pipes are closed + # If source is not already compressed then compress it if ($strRemote eq 'out' && !$bSourceCompressed) { - close($hPipeOut); - } - elsif ($strRemote eq 'in' && !$bDestinationCompress) - { - close($hPipeIn); - } + my $iBlockSize; + my $tCompressedBuffer; + my $iCompressedBufferSize; + my $tUncompressedBuffer; - # Wait for the thread to acknowledge that it has completed - my $strMessage = $self->{oThreadResult}->dequeue(); + # Initialize message to indicate that a checksum will be sent + if ($bProtocol && defined($hOut)) + { + $strMessage = 'checksum'; + } - if ($strMessage eq 'complete') - { + # Initialize checksum + my $oSHA = Digest::SHA->new('sha1'); + + # Initialize inflate object and check for errors + my ($oZLib, $iZLibStatus) = + new Compress::Raw::Zlib::Deflate(WindowBits => 15 & $bDestinationCompress ? WANT_GZIP : 0, + Level => $bDestinationCompress ? $self->{iCompressLevel} : + $self->{iCompressLevelNetwork}, + Bufsize => $self->{iBlockSize}, AppendOutput => 1); + + if ($iZLibStatus != Z_OK) + { + confess &log(ERROR, "unable create a deflate object: ${iZLibStatus}"); + } + + do + { + # Read a block from the stream + $iBlockSize = $self->stream_read($hIn, \$tUncompressedBuffer, $self->{iBlockSize}); + + # If block size > 0 then compress + if ($iBlockSize > 0) + { + # Update checksum and filesize + $oSHA->add($tUncompressedBuffer); + + # Compress the data + $iZLibStatus = $oZLib->deflate($tUncompressedBuffer, $tCompressedBuffer); + $iCompressedBufferSize = length($tCompressedBuffer); + + # If compression was successful + if ($iZLibStatus == Z_OK) + { + # The compressed data is larger than block size, then write + if ($iCompressedBufferSize > $self->{iBlockSize}) + { + $self->block_write($hOut, \$tCompressedBuffer, $iCompressedBufferSize, $bProtocol, $strMessage); + undef($tCompressedBuffer); + undef($strMessage); + } + } + # Else if error + else + { + $iBlockSize = 0; + } + } + + } + while ($iBlockSize > 0); + + # If good so far flush out the last bytes + if ($iZLibStatus == Z_OK) + { + $iZLibStatus = $oZLib->flush($tCompressedBuffer); + } + + # Make sure the compression succeeded + if ($iZLibStatus != Z_OK) + { + confess &log(ERROR, "unable to deflate stream: ${iZLibStatus}"); + } + + # Get checksum and total uncompressed bytes written + $strChecksum = $oSHA->hexdigest(); + $iFileSize = $oZLib->total_in(); + + # Write out the last block + if (defined($hOut)) + { + $iCompressedBufferSize = length($tCompressedBuffer); + + if ($iCompressedBufferSize > 0) + { + $self->block_write($hOut, \$tCompressedBuffer, $iCompressedBufferSize, $bProtocol, $strMessage); + undef($strMessage); + } + + $self->block_write($hOut, undef, 0, $bProtocol, "${strChecksum}-${iFileSize}"); + } } - # If any other message is returned then error + # If source is already compressed or transfer is not compressed then just read the stream else { - confess "unknown thread message while waiting for complete: ${strMessage}"; + my $iBlockSize; + my $tBuffer; + my $tCompressedBuffer; + my $tUncompressedBuffer; + my $iUncompressedBufferSize; + my $oSHA; + my $oZLib; + my $iZLibStatus; + + # If the destination will be compressed setup deflate + if ($bDestinationCompress) + { + if ($bProtocol) + { + $strMessage = 'checksum'; + } + + # Initialize checksum and size + $oSHA = Digest::SHA->new('sha1'); + $iFileSize = 0; + + # Initialize inflate object and check for errors + ($oZLib, $iZLibStatus) = + new Compress::Raw::Zlib::Inflate(WindowBits => WANT_GZIP, Bufsize => $self->{iBlockSize}, LimitOutput => 1); + + if ($iZLibStatus != Z_OK) + { + confess &log(ERROR, "unable create a inflate object: ${iZLibStatus}"); + } + } + # Initialize message to indicate that a checksum will not be sent + elsif ($bProtocol) + { + $strMessage = 'nochecksum'; + } + + # Read input + do + { + $iBlockSize = $self->stream_read($hIn, \$tBuffer, $self->{iBlockSize}); + + # Write a block if size > 0 + if ($iBlockSize > 0) + { + $self->block_write($hOut, \$tBuffer, $iBlockSize, $bProtocol, $strMessage); + undef($strMessage); + } + + # Decompress the buffer to calculate checksum/size + if ($bDestinationCompress) + { + # If the block contains data, decompress it + if ($iBlockSize > 0) + { + # Copy file buffer to compressed buffer + if (defined($tCompressedBuffer)) + { + $tCompressedBuffer .= $tBuffer; + } + else + { + $tCompressedBuffer = $tBuffer; + } + + # Keep looping while there is more to decompress + do + { + # Decompress data + $iZLibStatus = $oZLib->inflate($tCompressedBuffer, $tUncompressedBuffer); + $iUncompressedBufferSize = length($tUncompressedBuffer); + + # If status is ok, write the data + if ($iZLibStatus == Z_OK || $iZLibStatus == Z_BUF_ERROR || $iZLibStatus == Z_STREAM_END) + { + if ($iUncompressedBufferSize > 0) + { + $oSHA->add($tUncompressedBuffer); + $iFileSize += $iUncompressedBufferSize; + } + } + # Else error, exit so it can be handled + else + { + $iBlockSize = 0; + } + } + while ($iZLibStatus == Z_OK && $iUncompressedBufferSize > 0 && $iBlockSize > 0); + } + } + } + while ($iBlockSize > 0); + + # Check decompression get checksum + if ($bDestinationCompress) + { + # Make sure the decompression succeeded (iBlockSize < 0 indicates remote error, handled later) + if ($iBlockSize == 0 && $iZLibStatus != Z_STREAM_END) + { + confess &log(ERROR, "unable to inflate stream: ${iZLibStatus}"); + } + + # Get checksum + $strChecksum = $oSHA->hexdigest(); + + # Set protocol message + if ($bProtocol) + { + $strMessage = "${strChecksum}-${iFileSize}"; + } + } + + # If protocol write + if ($bProtocol) + { + # Write 0 block to indicate end of stream + $self->block_write($hOut, undef, 0, $bProtocol, $strMessage); + } } } + + # If message is defined the the checksum and size should be in it + if (defined($strMessage)) + { + my @stryToken = split(/-/, $strMessage); + $strChecksum = $stryToken[0]; + $iFileSize = $stryToken[1]; + } + + # Return the checksum and size if they are available + return $strChecksum, $iFileSize; } #################################################################################################################################### @@ -691,10 +1071,13 @@ sub command_param_string my $strParamList; - foreach my $strParam (sort(keys $oParamHashRef)) + if (defined($oParamHashRef)) { - $strParamList .= (defined($strParamList) ? ',' : '') . "${strParam}=" . - (defined(${$oParamHashRef}{"${strParam}"}) ? ${$oParamHashRef}{"${strParam}"} : '[undef]'); + foreach my $strParam (sort(keys $oParamHashRef)) + { + $strParamList .= (defined($strParamList) ? ',' : '') . "${strParam}=" . + (defined(${$oParamHashRef}{"${strParam}"}) ? ${$oParamHashRef}{"${strParam}"} : '[undef]'); + } } return $strParamList; @@ -817,5 +1200,4 @@ sub command_execute return $self->output_read($bOutputRequired, $strErrorPrefix); } -no Moose; -__PACKAGE__->meta->make_immutable; +1; diff --git a/lib/BackRest/Restore.pm b/lib/BackRest/Restore.pm new file mode 100644 index 000000000..09be1d423 --- /dev/null +++ b/lib/BackRest/Restore.pm @@ -0,0 +1,765 @@ +#################################################################################################################################### +# RESTORE MODULE +#################################################################################################################################### +package BackRest::Restore; + +use threads; +use threads::shared; +use Thread::Queue; +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename qw(dirname); +use File::stat qw(lstat); + +use lib dirname($0); +use BackRest::Exception; +use BackRest::Utility; +use BackRest::ThreadGroup; +use BackRest::Config; +use BackRest::Manifest; +use BackRest::File; +use BackRest::Db; + +#################################################################################################################################### +# Recovery.conf file +#################################################################################################################################### +use constant FILE_RECOVERY_CONF => 'recovery.conf'; + +#################################################################################################################################### +# CONSTRUCTOR +#################################################################################################################################### +sub new +{ + my $class = shift; # Class name + my $strDbClusterPath = shift; # Database cluster path + my $strBackupPath = shift; # Backup to restore + my $oRemapRef = shift; # Tablespace remaps + my $oFile = shift; # Default file object + my $iThreadTotal = shift; # Total threads to run for restore + my $bDelta = shift; # perform delta restore + my $bForce = shift; # force a restore + my $strType = shift; # Recovery type + my $strTarget = shift; # Recovery target + my $bTargetExclusive = shift; # Target exlusive option + my $bTargetResume = shift; # Target resume option + my $strTargetTimeline = shift; # Target timeline option + my $oRecoveryRef = shift; # Other recovery options + my $strStanza = shift; # Restore stanza + my $strBackRestBin = shift; # Absolute backrest filename + my $strConfigFile = shift; # Absolute config filename (optional) + + # Create the class hash + my $self = {}; + bless $self, $class; + + # Initialize variables + $self->{strDbClusterPath} = $strDbClusterPath; + $self->{strBackupPath} = $strBackupPath; + $self->{oRemapRef} = $oRemapRef; + $self->{oFile} = $oFile; + $self->{iThreadTotal} = defined($iThreadTotal) ? $iThreadTotal : 1; + $self->{bDelta} = $bDelta; + $self->{bForce} = $bForce; + $self->{strType} = $strType; + $self->{strTarget} = $strTarget; + $self->{bTargetExclusive} = $bTargetExclusive; + $self->{bTargetResume} = $bTargetResume; + $self->{strTargetTimeline} = $strTargetTimeline; + $self->{oRecoveryRef} = $oRecoveryRef; + $self->{strStanza} = $strStanza; + $self->{strBackRestBin} = $strBackRestBin; + $self->{strConfigFile} = $strConfigFile; + + return $self; +} + +#################################################################################################################################### +# MANIFEST_OWNERSHIP_CHECK +# +# Checks the users and groups that exist in the manifest and emits warnings for ownership that cannot be set properly, either +# because the current user does not have permissions or because the user/group does not exist. +#################################################################################################################################### +sub manifest_ownership_check +{ + my $self = shift; # Class hash + my $oManifest = shift; # Backup manifest + + # Create hashes to track valid/invalid users/groups + my %oOwnerHash = (); + + # Create hash for each type and owner to be checked + my $strDefaultUser = getpwuid($<); + my $strDefaultGroup = getgrgid($(); + + my %oFileTypeHash = (&MANIFEST_PATH => true, &MANIFEST_LINK => true, &MANIFEST_FILE => true); + my %oOwnerTypeHash = (&MANIFEST_SUBKEY_USER => $strDefaultUser, &MANIFEST_SUBKEY_GROUP => $strDefaultGroup); + + # Loop through owner types (user, group) + foreach my $strOwnerType (sort (keys %oOwnerTypeHash)) + { + # Loop through all backup paths (base and tablespaces) + foreach my $strPathKey ($oManifest->keys(MANIFEST_SECTION_BACKUP_PATH)) + { + # Loop through types (path, link, file) + foreach my $strFileType (sort (keys %oFileTypeHash)) + { + my $strSection = "${strPathKey}:${strFileType}"; + + # Get users and groups for paths + if ($oManifest->test($strSection)) + { + foreach my $strName ($oManifest->keys($strSection)) + { + my $strOwner = $oManifest->get($strSection, $strName, $strOwnerType); + + # If root then test to see if the user/group is valid + if ($< == 0) + { + # If the owner has not been tested yet then test it + if (!defined($oOwnerHash{$strOwnerType}{$strOwner})) + { + my $strOwnerId; + + if ($strOwnerType eq 'user') + { + $strOwnerId = getpwnam($strOwner); + } + else + { + $strOwnerId = getgrnam($strOwner); + } + + $oOwnerHash{$strOwnerType}{$strOwner} = defined($strOwnerId) ? true : false; + } + + if (!$oOwnerHash{$strOwnerType}{$strOwner}) + { + $oManifest->set($strSection, $strName, $strOwnerType, $oOwnerTypeHash{$strOwnerType}); + } + } + # Else set user/group to current user/group + else + { + if ($strOwner ne $oOwnerTypeHash{$strOwnerType}) + { + $oOwnerHash{$strOwnerType}{$strOwner} = false; + $oManifest->set($strSection, $strName, $strOwnerType, $oOwnerTypeHash{$strOwnerType}); + } + } + } + } + } + } + + # Output warning for any invalid owners + if (defined($oOwnerHash{$strOwnerType})) + { + foreach my $strOwner (sort (keys $oOwnerHash{$strOwnerType})) + { + if (!$oOwnerHash{$strOwnerType}{$strOwner}) + { + &log(WARN, "${strOwnerType} ${strOwner} " . ($< == 0 ? "does not exist" : "cannot be set") . + ", changed to $oOwnerTypeHash{$strOwnerType}"); + } + } + } + } +} + +#################################################################################################################################### +# MANIFEST_LOAD +# +# Loads the backup manifest and performs requested tablespace remaps. +#################################################################################################################################### +sub manifest_load +{ + my $self = shift; # Class hash + + if ($self->{oFile}->exists(PATH_BACKUP_CLUSTER, $self->{strBackupPath})) + { + # Copy the backup manifest to the db cluster path + $self->{oFile}->copy(PATH_BACKUP_CLUSTER, $self->{strBackupPath} . '/' . FILE_MANIFEST, + PATH_DB_ABSOLUTE, $self->{strDbClusterPath} . '/' . FILE_MANIFEST); + + # Load the manifest into a hash + my $oManifest = new BackRest::Manifest($self->{oFile}->path_get(PATH_DB_ABSOLUTE, + $self->{strDbClusterPath} . '/' . FILE_MANIFEST)); + + # Remove the manifest now that it is in memory + $self->{oFile}->remove(PATH_DB_ABSOLUTE, $self->{strDbClusterPath} . '/' . FILE_MANIFEST); + + # If backup is latest then set it equal to backup label, else verify that requested backup and label match + my $strBackupLabel = $oManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL); + + if ($self->{strBackupPath} eq OPTION_DEFAULT_RESTORE_SET) + { + $self->{strBackupPath} = $strBackupLabel; + } + elsif ($self->{strBackupPath} ne $strBackupLabel) + { + confess &log(ASSERT, "request backup $self->{strBackupPath} and label ${strBackupLabel} do not match " . + ' - this indicates some sort of corruption (at the very least paths have been renamed.'); + } + + if ($self->{strDbClusterPath} ne $oManifest->get(MANIFEST_SECTION_BACKUP_PATH, MANIFEST_KEY_BASE)) + { + &log(INFO, 'base path remapped to ' . $self->{strDbClusterPath}); + $oManifest->set(MANIFEST_SECTION_BACKUP_PATH, MANIFEST_KEY_BASE, undef, $self->{strDbClusterPath}); + } + + # If tablespaces have been remapped, update the manifest + if (defined($self->{oRemapRef})) + { + foreach my $strPathKey (sort(keys $self->{oRemapRef})) + { + my $strRemapPath = ${$self->{oRemapRef}}{$strPathKey}; + + # Make sure that the tablespace exists in the manifest + if (!$oManifest->test(MANIFEST_SECTION_BACKUP_TABLESPACE, $strPathKey)) + { + confess &log(ERROR, "cannot remap invalid tablespace ${strPathKey} to ${strRemapPath}"); + } + + # Remap the tablespace in the manifest + &log(INFO, "remapping tablespace ${strPathKey} to ${strRemapPath}"); + + my $strTablespaceLink = $oManifest->get(MANIFEST_SECTION_BACKUP_TABLESPACE, $strPathKey, MANIFEST_SUBKEY_LINK); + + $oManifest->set(MANIFEST_SECTION_BACKUP_PATH, "tablespace:${strPathKey}", undef, $strRemapPath); + $oManifest->set(MANIFEST_SECTION_BACKUP_TABLESPACE, $strPathKey, MANIFEST_SUBKEY_PATH, $strRemapPath); + $oManifest->set('base:link', "pg_tblspc/${strTablespaceLink}", MANIFEST_SUBKEY_DESTINATION, $strRemapPath); + } + } + + $self->manifest_ownership_check($oManifest); + + return $oManifest; + } + + confess &log(ERROR, 'backup ' . $self->{strBackupPath} . ' does not exist'); +} + +#################################################################################################################################### +# CLEAN +# +# Checks that the restore paths are empty, or if --force was used then it cleans files/paths/links from the restore directories that +# are not present in the manifest. +#################################################################################################################################### +sub clean +{ + my $self = shift; # Class hash + my $oManifest = shift; # Backup manifest + + # Track if files/links/paths where removed + my %oRemoveHash = (&MANIFEST_FILE => 0, &MANIFEST_PATH => 0, &MANIFEST_LINK => 0); + + # Check each restore directory in the manifest and make sure that it exists and is empty. + # The --force option can be used to override the empty requirement. + foreach my $strPathKey ($oManifest->keys(MANIFEST_SECTION_BACKUP_PATH)) + { + my $strPath = $oManifest->get(MANIFEST_SECTION_BACKUP_PATH, $strPathKey); + + &log(INFO, "checking/cleaning db path ${strPath}"); + + if (!$self->{oFile}->exists(PATH_DB_ABSOLUTE, $strPath)) + { + confess &log(ERROR, "required db path '${strPath}' does not exist"); + } + + # Load path manifest so it can be compared to deleted files/paths/links that are not in the backup + my %oPathManifest; + $self->{oFile}->manifest(PATH_DB_ABSOLUTE, $strPath, \%oPathManifest); + + foreach my $strName (sort {$b cmp $a} (keys $oPathManifest{name})) + { + # Skip the root path + if ($strName eq '.') + { + next; + } + + # If force was not specified then error if any file is found + if (!$self->{bForce} && !$self->{bDelta}) + { + confess &log(ERROR, "cannot restore to path '${strPath}' that contains files - " . + 'try using --delta if this is what you intended', ERROR_RESTORE_PATH_NOT_EMPTY); + } + + my $strFile = "${strPath}/${strName}"; + + # Determine the file/path/link type + my $strType = MANIFEST_FILE; + + if ($oPathManifest{name}{$strName}{type} eq 'd') + { + $strType = MANIFEST_PATH; + } + elsif ($oPathManifest{name}{$strName}{type} eq 'l') + { + $strType = MANIFEST_LINK; + } + + # Build the section name + my $strSection = "${strPathKey}:${strType}"; + + # Check to see if the file/path/link exists in the manifest + if ($oManifest->test($strSection, $strName)) + { + my $strUser = $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_USER); + my $strGroup = $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_GROUP); + + # If ownership does not match, fix it + if ($strUser ne $oPathManifest{name}{$strName}{user} || + $strGroup ne $oPathManifest{name}{$strName}{group}) + { + &log(DEBUG, "setting ${strFile} ownership to ${strUser}:${strGroup}"); + + $self->{oFile}->owner(PATH_DB_ABSOLUTE, $strFile, $strUser, $strGroup); + } + + # If a link does not have the same destination, then delete it (it will be recreated later) + if ($strType eq MANIFEST_LINK) + { + if ($strType eq MANIFEST_LINK && $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_DESTINATION) ne + $oPathManifest{name}{$strName}{link_destination}) + { + &log(DEBUG, "removing link ${strFile} - destination changed"); + unlink($strFile) or confess &log(ERROR, "unable to delete file ${strFile}"); + } + } + # Else if file/path mode does not match, fix it + else + { + my $strMode = $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODE); + + if ($strType ne MANIFEST_LINK && $strMode ne $oPathManifest{name}{$strName}{mode}) + { + &log(DEBUG, "setting ${strFile} mode to ${strMode}"); + + chmod(oct($strMode), $strFile) + or confess 'unable to set mode ${strMode} for ${strFile}'; + } + } + } + # If it does not then remove it + else + { + # If a path then remove it, all the files should have already been deleted since we are going in reverse order + if ($strType eq MANIFEST_PATH) + { + &log(DEBUG, "removing path ${strFile}"); + rmdir($strFile) or confess &log(ERROR, "unable to delete path ${strFile}, is it empty?"); + } + # Else delete a file/link + else + { + # Delete only if this is not the recovery.conf file. This is in case the use wants the recovery.conf file + # preserved. It will be written/deleted/preserved as needed in recovery(). + if (!($strName eq FILE_RECOVERY_CONF && $strType eq MANIFEST_FILE)) + { + &log(DEBUG, "removing file/link ${strFile}"); + unlink($strFile) or confess &log(ERROR, "unable to delete file/link ${strFile}"); + } + } + + $oRemoveHash{$strType} += 1; + } + } + } + + # Loop through types (path, link, file) and emit info if any were removed + foreach my $strFileType (sort (keys %oRemoveHash)) + { + if ($oRemoveHash{$strFileType} > 0) + { + &log(INFO, "$oRemoveHash{$strFileType} ${strFileType}(s) removed during cleanup"); + } + } +} + +#################################################################################################################################### +# BUILD +# +# Creates missing paths and links and corrects ownership/mode on existing paths and links. +#################################################################################################################################### +sub build +{ + my $self = shift; # Class hash + my $oManifest = shift; # Backup manifest + + # Build paths/links in each restore path + foreach my $strSectionPathKey ($oManifest->keys(MANIFEST_SECTION_BACKUP_PATH)) + { + my $strSectionPath = $oManifest->get(MANIFEST_SECTION_BACKUP_PATH, $strSectionPathKey); + + # Create all paths in the manifest that do not already exist + my $strSection = "${strSectionPathKey}:path"; + + foreach my $strName ($oManifest->keys($strSection)) + { + # Skip the root path + if ($strName eq '.') + { + next; + } + + # Create the Path + my $strPath = "${strSectionPath}/${strName}"; + + if (!$self->{oFile}->exists(PATH_DB_ABSOLUTE, $strPath)) + { + $self->{oFile}->path_create(PATH_DB_ABSOLUTE, $strPath, + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODE)); + } + } + + # Create all links in the manifest that do not already exist + $strSection = "${strSectionPathKey}:link"; + + if ($oManifest->test($strSection)) + { + foreach my $strName ($oManifest->keys($strSection)) + { + my $strLink = "${strSectionPath}/${strName}"; + + if (!$self->{oFile}->exists(PATH_DB_ABSOLUTE, $strLink)) + { + $self->{oFile}->link_create(PATH_DB_ABSOLUTE, + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_DESTINATION), + PATH_DB_ABSOLUTE, $strLink); + } + } + } + } +} + +#################################################################################################################################### +# RECOVERY +# +# Creates the recovery.conf file. +#################################################################################################################################### +sub recovery +{ + my $self = shift; # Class hash + + # Create recovery.conf path/file + my $strRecoveryConf = $self->{strDbClusterPath} . '/' . FILE_RECOVERY_CONF; + + # See if recovery.conf already exists + my $bRecoveryConfExists = $self->{oFile}->exists(PATH_DB_ABSOLUTE, $strRecoveryConf); + + # If RECOVERY_TYPE_PRESERVE then make sure recovery.conf exists and return + if ($self->{strType} eq RECOVERY_TYPE_PRESERVE) + { + if (!$bRecoveryConfExists) + { + confess &log(ERROR, "recovery type is $self->{strType} but recovery file does not exist at ${strRecoveryConf}"); + } + + return; + } + + # In all other cases the old recovery.conf should be removed if it exists + if ($bRecoveryConfExists) + { + $self->{oFile}->remove(PATH_DB_ABSOLUTE, $strRecoveryConf); + } + + # If RECOVERY_TYPE_NONE then return + if ($self->{strType} eq RECOVERY_TYPE_NONE) + { + return; + } + + # Write the recovery options from pg_backrest.conf + my $strRecovery = ''; + my $bRestoreCommandOverride = false; + + if (defined($self->{oRecoveryRef})) + { + foreach my $strKey (sort(keys $self->{oRecoveryRef})) + { + my $strPgKey = $strKey; + $strPgKey =~ s/\-/\_/g; + + if ($strPgKey eq 'restore_command') + { + $bRestoreCommandOverride = true; + } + + $strRecovery .= "$strPgKey = '${$self->{oRecoveryRef}}{$strKey}'\n"; + } + } + + # Write the restore command + if (!$bRestoreCommandOverride) + { + $strRecovery .= "restore_command = '$self->{strBackRestBin} --stanza=$self->{strStanza}" . + (defined($self->{strConfigFile}) ? " --config=$self->{strConfigFile}" : '') . + " archive-get %f \"%p\"'\n"; + } + + # If RECOVERY_TYPE_DEFAULT do not write target options + if ($self->{strType} ne RECOVERY_TYPE_DEFAULT) + { + # Write the recovery target + $strRecovery .= "recovery_target_$self->{strType} = '$self->{strTarget}'\n"; + + # Write recovery_target_inclusive + if ($self->{bTargetExclusive}) + { + $strRecovery .= "recovery_target_inclusive = false\n"; + } + } + + # Write pause_at_recovery_target + if ($self->{bTargetResume}) + { + $strRecovery .= "pause_at_recovery_target = false\n"; + } + + # Write recovery_target_timeline + if (defined($self->{strTargetTimeline})) + { + $strRecovery .= "recovery_target_timeline = $self->{strTargetTimeline}\n"; + } + + # Write recovery.conf + my $hFile; + + open($hFile, '>', $strRecoveryConf) + or confess &log(ERROR, "unable to open ${strRecoveryConf}: $!"); + + syswrite($hFile, $strRecovery) + or confess "unable to write section ${strRecoveryConf}: $!"; + + close($hFile) + or confess "unable to close ${strRecoveryConf}: $!"; +} + +#################################################################################################################################### +# RESTORE +# +# Takes a backup and restores it back to the original or a remapped location. +#################################################################################################################################### +sub restore +{ + my $self = shift; # Class hash + + # Make sure that Postgres is not running + if ($self->{oFile}->exists(PATH_DB_ABSOLUTE, $self->{strDbClusterPath} . '/' . FILE_POSTMASTER_PID)) + { + confess &log(ERROR, 'unable to restore while Postgres is running', ERROR_POSTMASTER_RUNNING); + } + + # Log the backup set to restore + &log(INFO, "Restoring backup set " . $self->{strBackupPath}); + + # Make sure the backup path is valid and load the manifest + my $oManifest = $self->manifest_load(); + + # Clean the restore paths + $self->clean($oManifest); + + # Build paths/links in the restore paths + $self->build($oManifest); + + # Create thread queues + my @oyRestoreQueue; + + foreach my $strPathKey ($oManifest->keys(MANIFEST_SECTION_BACKUP_PATH)) + { + my $strSection = "${strPathKey}:file"; + + if ($oManifest->test($strSection)) + { + $oyRestoreQueue[@oyRestoreQueue] = Thread::Queue->new(); + + foreach my $strName ($oManifest->keys($strSection)) + { + $oyRestoreQueue[@oyRestoreQueue - 1]->enqueue("${strPathKey}|${strName}"); + } + } + } + + # If multi-threaded then create threads to copy files + if ($self->{iThreadTotal} > 1) + { + # Create threads to process the thread queues + my $oThreadGroup = thread_group_create(); + + for (my $iThreadIdx = 0; $iThreadIdx < $self->{iThreadTotal}; $iThreadIdx++) + { + &log(DEBUG, "starting restore thread ${iThreadIdx}"); + thread_group_add($oThreadGroup, threads->create(\&restore_thread, $self, true, + $iThreadIdx, \@oyRestoreQueue, $oManifest)); + } + + # Complete thread queues + thread_group_complete($oThreadGroup); + } + # Else copy in the main process + else + { + &log(DEBUG, "starting restore in main process"); + $self->restore_thread(false, 0, \@oyRestoreQueue, $oManifest); + } + + # Create recovery.conf file + $self->recovery(); +} + +#################################################################################################################################### +# RESTORE_THREAD +# +# Worker threads for the restore process. +#################################################################################################################################### +sub restore_thread +{ + my $self = shift; # Class hash + my $bMulti = shift; # Is this thread one of many? + my $iThreadIdx = shift; # Defines the index of this thread + my $oyRestoreQueueRef = shift; # Restore queues + my $oManifest = shift; # Backup manifest + + my $iDirection = $iThreadIdx % 2 == 0 ? 1 : -1; # Size of files currently copied by this thread + my $oFileThread; # Thread local file object + + # If multi-threaded, then clone the file object + if ($bMulti) + { + $oFileThread = $self->{oFile}->clone($iThreadIdx); + } + # Else use the master file object + else + { + $oFileThread = $self->{oFile}; + } + + # Initialize the starting and current queue index based in the total number of threads in relation to this thread + my $iQueueStartIdx = int((@{$oyRestoreQueueRef} / $self->{iThreadTotal}) * $iThreadIdx); + my $iQueueIdx = $iQueueStartIdx; + + # Time when the backup copying began - used for size/timestamp deltas + my $lCopyTimeBegin = $oManifest->epoch(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_COPY_START); + + # Set source compression + my $bSourceCompression = $oManifest->get(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS) eq 'y' ? true : false; + + # When a KILL signal is received, immediately abort + $SIG{'KILL'} = sub {threads->exit();}; + + # Get the current user and group to compare with stored mode + my $strCurrentUser = getpwuid($<); + my $strCurrentGroup = getgrgid($(); + + # Loop through all the queues to restore files (exit when the original queue is reached + do + { + while (my $strMessage = ${$oyRestoreQueueRef}[$iQueueIdx]->dequeue_nb()) + { + my $strSourcePath = (split(/\|/, $strMessage))[0]; # Source path from backup + my $strSection = "${strSourcePath}:file"; # Backup section with file info + my $strDestinationPath = $oManifest->get(MANIFEST_SECTION_BACKUP_PATH, # Destination path stored in manifest + $strSourcePath); + $strSourcePath =~ s/\:/\//g; # Replace : with / in source path + my $strName = (split(/\|/, $strMessage))[1]; # Name of file to be restored + + # If the file is a reference to a previous backup and hardlinks are off, then fetch it from that backup + my $strReference = $oManifest->test(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, 'y') ? undef : + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_REFERENCE, false); + + # Generate destination file name + my $strDestinationFile = $oFileThread->path_get(PATH_DB_ABSOLUTE, "${strDestinationPath}/${strName}"); + + if ($oFileThread->exists(PATH_DB_ABSOLUTE, $strDestinationFile)) + { + # Perform delta if requested + if ($self->{bDelta}) + { + # If force then use size/timestamp delta + if ($self->{bForce}) + { + my $oStat = lstat($strDestinationFile); + + # Make sure that timestamp/size are equal and that timestamp is before the copy start time of the backup + if (defined($oStat) && + $oStat->size == $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_SIZE) && + $oStat->mtime == $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME) && + $oStat->mtime < $lCopyTimeBegin) + { + &log(DEBUG, "${strDestinationFile} exists and matches size " . $oStat->size . + " and modification time " . $oStat->mtime); + next; + } + } + else + { + my ($strChecksum, $lSize) = $oFileThread->hash_size(PATH_DB_ABSOLUTE, $strDestinationFile); + + if (($lSize == $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_SIZE) && $lSize == 0) || + ($strChecksum eq $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM))) + { + &log(DEBUG, "${strDestinationFile} exists and is zero size or matches backup checksum"); + + # Even if hash is the same set the time back to backup time. This helps with unit testing, but also + # presents a pristine version of the database. + utime($oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME), + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME), + $strDestinationFile) + or confess &log(ERROR, "unable to set time for ${strDestinationFile}"); + + next; + } + } + } + + $oFileThread->remove(PATH_DB_ABSOLUTE, $strDestinationFile); + } + + # Set user and group if running as root (otherwise current user and group will be used for restore) + # Copy the file from the backup to the database + my ($bCopyResult, $strCopyChecksum, $lCopySize) = + $oFileThread->copy(PATH_BACKUP_CLUSTER, (defined($strReference) ? $strReference : $self->{strBackupPath}) . + "/${strSourcePath}/${strName}" . + ($bSourceCompression ? '.' . $oFileThread->{strCompressExtension} : ''), + PATH_DB_ABSOLUTE, $strDestinationFile, + $bSourceCompression, # Source is compressed based on backup settings + undef, undef, + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODIFICATION_TIME), + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_MODE), + undef, + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_USER), + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_GROUP)); + + if ($lCopySize != 0 && $strCopyChecksum ne $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM)) + { + confess &log(ERROR, "error restoring ${strDestinationFile}: actual checksum ${strCopyChecksum} " . + "does not match expected checksum " . + $oManifest->get($strSection, $strName, MANIFEST_SUBKEY_CHECKSUM), ERROR_CHECKSUM); + } + } + + # Even number threads move up when they have finished a queue, odd numbered threads move down + $iQueueIdx += $iDirection; + + # Reset the queue index when it goes over or under the number of queues + if ($iQueueIdx < 0) + { + $iQueueIdx = @{$oyRestoreQueueRef} - 1; + } + elsif ($iQueueIdx >= @{$oyRestoreQueueRef}) + { + $iQueueIdx = 0; + } + + &log(TRACE, "thread waiting for new file from queue: queue ${iQueueIdx}, start queue ${iQueueStartIdx}"); + } + while ($iQueueIdx != $iQueueStartIdx); + + &log(DEBUG, "thread ${iThreadIdx} exiting"); +} + +1; diff --git a/lib/BackRest/ThreadGroup.pm b/lib/BackRest/ThreadGroup.pm new file mode 100644 index 000000000..4e8cd85d1 --- /dev/null +++ b/lib/BackRest/ThreadGroup.pm @@ -0,0 +1,165 @@ +#################################################################################################################################### +# THREADGROUP MODULE +#################################################################################################################################### +package BackRest::ThreadGroup; + +use threads; +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename; + +use lib dirname($0) . '/../lib'; +use BackRest::Utility; + +#################################################################################################################################### +# MODULE EXPORTS +#################################################################################################################################### +use Exporter qw(import); + +our @EXPORT = qw(thread_group_create thread_group_add thread_group_complete); + +#################################################################################################################################### +# CONSTRUCTOR +#################################################################################################################################### +sub thread_group_create +{ + # Create the class hash + my $self = {}; + + # Initialize variables + $self->{iThreadTotal} = 0; + + return $self; +} + +#################################################################################################################################### +# ADD +# +# Add a thread to the group. Once a thread is added, it can be tracked as part of the group. +#################################################################################################################################### +sub thread_group_add +{ + my $self = shift; + my $oThread = shift; + + $self->{oyThread}[$self->{iThreadTotal}] = $oThread; + $self->{iThreadTotal}++; + + return $self->{iThreadTotal} - 1; +} + +#################################################################################################################################### +# COMPLETE +# +# Wait for threads to complete. +#################################################################################################################################### +sub thread_group_complete +{ + my $self = shift; + my $iTimeout = shift; + my $bConfessOnError = shift; + + # Set defaults + $bConfessOnError = defined($bConfessOnError) ? $bConfessOnError : true; + + # Wait for all threads to complete and handle errors + my $iThreadComplete = 0; + my $lTimeBegin = time(); + + # Rejoin the threads + while ($iThreadComplete < $self->{iThreadTotal}) + { + hsleep(.1); + + # If a timeout has been defined, make sure we have not been running longer than that + if (defined($iTimeout)) + { + if (time() - $lTimeBegin >= $iTimeout) + { + confess &log(ERROR, "threads have been running more than ${iTimeout} seconds, exiting..."); + + #backup_thread_kill(); + + #confess &log(WARN, "all threads have exited, aborting..."); + } + } + + for (my $iThreadIdx = 0; $iThreadIdx < $self->{iThreadTotal}; $iThreadIdx++) + { + if (defined($self->{oyThread}[$iThreadIdx])) + { + if (defined($self->{oyThread}[$iThreadIdx]->error())) + { + $self->kill(); + + if ($bConfessOnError) + { + confess &log(ERROR, 'error in thread ' . (${iThreadIdx} + 1) . ': check log for details'); + } + else + { + return false; + } + } + + if ($self->{oyThread}[$iThreadIdx]->is_joinable()) + { + &log(DEBUG, "thread ${iThreadIdx} exited"); + $self->{oyThread}[$iThreadIdx]->join(); + &log(TRACE, "thread ${iThreadIdx} object undef"); + undef($self->{oyThread}[$iThreadIdx]); + $iThreadComplete++; + } + } + } + } + + &log(DEBUG, 'all threads exited'); + + return true; +} + +#################################################################################################################################### +# KILL +#################################################################################################################################### +sub thread_group_destroy +{ + my $self = shift; + + # Total number of threads killed + my $iTotal = 0; + + for (my $iThreadIdx = 0; $iThreadIdx < $self->{iThreadTotal}; $iThreadIdx++) + { + if (defined($self->{oyThread}[$iThreadIdx])) + { + if ($self->{oyThread}[$iThreadIdx]->is_running()) + { + $self->{oyThread}[$iThreadIdx]->kill('KILL')->join(); + } + elsif ($self->{oyThread}[$iThreadIdx]->is_joinable()) + { + $self->{oyThread}[$iThreadIdx]->join(); + } + + undef($self->{oyThread}[$iThreadIdx]); + $iTotal++; + } + } + + return($iTotal); +} + +#################################################################################################################################### +# DESTRUCTOR +#################################################################################################################################### +# sub thread_group_destroy +# { +# my $self = shift; +# +# $self->kill(); +# } + +1; diff --git a/lib/BackRest/Utility.pm b/lib/BackRest/Utility.pm index 1c1097ca9..0db4c202c 100644 --- a/lib/BackRest/Utility.pm +++ b/lib/BackRest/Utility.pm @@ -5,11 +5,13 @@ package BackRest::Utility; use threads; use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess longmess); use Fcntl qw(:DEFAULT :flock); use File::Path qw(remove_tree); +use Time::HiRes qw(gettimeofday usleep); +use POSIX qw(ceil); use File::Basename; use JSON; @@ -20,11 +22,11 @@ use Exporter qw(import); our @EXPORT = qw(version_get data_hash_build trim common_prefix wait_for_file file_size_format execute - log log_file_set log_level_set test_set test_check - lock_file_create lock_file_remove - config_save config_load timestamp_string_get timestamp_file_string_get + log log_file_set log_level_set test_set test_get test_check + lock_file_create lock_file_remove hsleep wait_remainder + ini_save ini_load timestamp_string_get timestamp_file_string_get TRACE DEBUG ERROR ASSERT WARN INFO OFF true false - TEST TEST_ENCLOSE TEST_MANIFEST_BUILD); + TEST TEST_ENCLOSE TEST_MANIFEST_BUILD TEST_BACKUP_RESUME TEST_BACKUP_NORESUME FORMAT); # Global constants use constant @@ -60,19 +62,29 @@ $oLogLevelRank{ERROR}{rank} = 2; $oLogLevelRank{ASSERT}{rank} = 1; $oLogLevelRank{OFF}{rank} = 0; +#################################################################################################################################### +# FORMAT Constant +# +# Identified the format of the manifest and file structure. The format is used to determine compatability between versions. +#################################################################################################################################### +use constant FORMAT => 3; + #################################################################################################################################### # TEST Constants and Variables #################################################################################################################################### use constant { - TEST => 'TEST', - TEST_ENCLOSE => 'PgBaCkReStTeSt', - TEST_MANIFEST_BUILD => 'MANIFEST_BUILD' + TEST => 'TEST', + TEST_ENCLOSE => 'PgBaCkReStTeSt', + + TEST_MANIFEST_BUILD => 'MANIFEST_BUILD', + TEST_BACKUP_RESUME => 'BACKUP_RESUME', + TEST_BACKUP_NORESUME => 'BACKUP_NORESUME', }; # Test global variables my $bTest = false; -my $iTestDelay; +my $fTestDelay; #################################################################################################################################### # VERSION_GET @@ -155,6 +167,21 @@ sub lock_file_remove } } +#################################################################################################################################### +# WAIT_REMAINDER - Wait the remainder of the current second +#################################################################################################################################### +sub wait_remainder +{ + my $lTimeBegin = gettimeofday(); + my $lSleepMs = ceil(((int($lTimeBegin) + 1) - $lTimeBegin) * 1000); + + usleep($lSleepMs * 1000); + + &log(TRACE, "WAIT_REMAINDER: slept ${lSleepMs}ms: begin ${lTimeBegin}, end " . gettimeofday()); + + return int($lTimeBegin); +} + #################################################################################################################################### # DATA_HASH_BUILD - Hash a delimited file with header #################################################################################################################################### @@ -209,6 +236,16 @@ sub trim return $strBuffer; } +#################################################################################################################################### +# hsleep - wrapper for usleep that takes seconds in fractions and returns time slept in ms +#################################################################################################################################### +sub hsleep +{ + my $fSecond = shift; + + return usleep($fSecond * 1000000); +} + #################################################################################################################################### # WAIT_FOR_FILE #################################################################################################################################### @@ -223,18 +260,18 @@ sub wait_for_file while ($lTime > time() - $iSeconds) { - opendir $hDir, $strDir - or confess &log(ERROR, "Could not open path ${strDir}: $!\n"); - - my @stryFile = grep(/$strRegEx/i, readdir $hDir); - close $hDir; - - if (scalar @stryFile == 1) + if (opendir($hDir, $strDir)) { - return; + my @stryFile = grep(/$strRegEx/i, readdir $hDir); + close $hDir; + + if (scalar @stryFile == 1) + { + return; + } } - sleep(1); + hsleep(.1); } confess &log(ERROR, "could not find $strDir/$strRegEx after ${iSeconds} second(s)"); @@ -295,13 +332,19 @@ sub file_size_format sub timestamp_string_get { my $strFormat = shift; + my $lTime = shift; if (!defined($strFormat)) { $strFormat = '%4d-%02d-%02d %02d:%02d:%02d'; } - my ($iSecond, $iMinute, $iHour, $iMonthDay, $iMonth, $iYear, $iWeekDay, $iYearDay, $bIsDst) = localtime(time); + if (!defined($lTime)) + { + $lTime = time(); + } + + my ($iSecond, $iMinute, $iHour, $iMonthDay, $iMonth, $iYear, $iWeekDay, $iYearDay, $bIsDst) = localtime($lTime); return sprintf($strFormat, $iYear + 1900, $iMonth + 1, $iMonthDay, $iHour, $iMinute, $iSecond); } @@ -350,25 +393,33 @@ sub log_file_set sub test_set { my $bTestParam = shift; - my $iTestDelayParam = shift; + my $fTestDelayParam = shift; # Set defaults $bTest = defined($bTestParam) ? $bTestParam : false; - $iTestDelay = defined($bTestParam) ? $iTestDelayParam : $iTestDelay; + $fTestDelay = defined($bTestParam) ? $fTestDelayParam : $fTestDelay; # Make sure that a delay is specified in test mode - if ($bTest && !defined($iTestDelay)) + if ($bTest && !defined($fTestDelay)) { confess &log(ASSERT, 'iTestDelay must be provided when bTest is true'); } # Test delay should be between 1 and 600 seconds - if (!($iTestDelay >= 1 && $iTestDelay <= 600)) + if (!($fTestDelay >= 0 && $fTestDelay <= 600)) { confess &log(ERROR, 'test-delay must be between 1 and 600 seconds'); } } +#################################################################################################################################### +# TEST_GET - are we in test mode? +#################################################################################################################################### +sub test_get +{ + return $bTest; +} + #################################################################################################################################### # LOG_LEVEL_SET - set the log level for file and console #################################################################################################################################### @@ -379,22 +430,22 @@ sub log_level_set if (defined($strLevelFileParam)) { - if (!defined($oLogLevelRank{"${strLevelFileParam}"}{rank})) + if (!defined($oLogLevelRank{uc($strLevelFileParam)}{rank})) { confess &log(ERROR, "file log level ${strLevelFileParam} does not exist"); } - $strLogLevelFile = $strLevelFileParam; + $strLogLevelFile = uc($strLevelFileParam); } if (defined($strLevelConsoleParam)) { - if (!defined($oLogLevelRank{"${strLevelConsoleParam}"}{rank})) + if (!defined($oLogLevelRank{uc($strLevelConsoleParam)}{rank})) { confess &log(ERROR, "console log level ${strLevelConsoleParam} does not exist"); } - $strLogLevelConsole = $strLevelConsoleParam; + $strLogLevelConsole = uc($strLevelConsoleParam); } } @@ -444,6 +495,8 @@ sub log $strMessageFormat = '(undefined)'; } + $strMessageFormat = (defined($iCode) ? "[${iCode}] " : '') . $strMessageFormat; + # Indent subsequent lines of the message if it has more than one line - makes the log more readable if ($strLevel eq TRACE || $strLevel eq TEST) { @@ -464,8 +517,7 @@ sub log my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime(time); $strMessageFormat = timestamp_string_get() . sprintf(' T%02d', threads->tid()) . - (' ' x (7 - length($strLevel))) . "${strLevel}: ${strMessageFormat}" . - (defined($iCode) ? " (code ${iCode})" : '') . "\n"; + (' ' x (7 - length($strLevel))) . "${strLevel}: ${strMessageFormat}\n"; # Output to console depending on log level and test flag if ($iLogLevelRank <= $oLogLevelRank{"${strLogLevelConsole}"}{rank} || @@ -479,7 +531,11 @@ sub log if ($bTest && $strLevel eq TEST) { *STDOUT->flush(); - sleep($iTestDelay); + + if ($fTestDelay > 0) + { + hsleep($fTestDelay); + } } } @@ -491,6 +547,14 @@ sub log if (!$bSuppressLog) { print $hLogFile $strMessageFormat; + + if ($strLevel eq ERROR || $strLevel eq ASSERT) + { + my $strStackTrace = longmess() . "\n"; + $strStackTrace =~ s/\n/\n /g; + + print $hLogFile $strStackTrace; + } } } } @@ -498,7 +562,7 @@ sub log # Throw a typed exception if code is defined if (defined($iCode)) { - return BackRest::Exception->new(iCode => $iCode, strMessage => $strMessage); + return new BackRest::Exception($iCode, $strMessage); } # Return the message test so it can be used in a confess @@ -506,16 +570,16 @@ sub log } #################################################################################################################################### -# CONFIG_LOAD +# INI_LOAD # -# Load configuration file from standard INI format to a hash. +# Load file from standard INI format to a hash. #################################################################################################################################### -sub config_load +sub ini_load { - my $strFile = shift; # Full path to config file to load from - my $oConfig = shift; # Reference to the hash where config data will be stored + my $strFile = shift; # Full path to ini file to load from + my $oConfig = shift; # Reference to the hash where ini data will be stored - # Open the config file for reading + # Open the ini file for reading my $hFile; my $strSection; @@ -562,19 +626,21 @@ sub config_load } close($hFile); + + return($oConfig); } #################################################################################################################################### -# CONFIG_SAVE +# INI_SAVE # -# Save configuration file from a hash to standard INI format. +# Save from a hash to standard INI format. #################################################################################################################################### -sub config_save +sub ini_save { - my $strFile = shift; # Full path to config file to save to - my $oConfig = shift; # Reference to the hash where config data is stored + my $strFile = shift; # Full path to ini file to save to + my $oConfig = shift; # Reference to the hash where ini data is stored - # Open the config file for writing + # Open the ini file for writing my $hFile; my $bFirst = true; @@ -600,7 +666,7 @@ sub config_save { if (ref($strValue) eq "HASH") { - syswrite($hFile, "${strKey}=" . encode_json($strValue) . "\n") + syswrite($hFile, "${strKey}=" . to_json($strValue, {canonical => true}) . "\n") or confess "unable to write key ${strKey}: $!"; } else diff --git a/test/data/test.archive1.bin b/test/data/test.archive1.bin new file mode 100644 index 0000000000000000000000000000000000000000..7b9db308f3e6d9d2fc45519eebf310a73b6bf4ec GIT binary patch literal 16777216 zcmeFvJ!_Lu6ae6xG?jh`)-8k3I20mes31i|A&Nr>2ayg1heou;LKKm9wxgq~Q~d!t zIfx*VrMrJX(8&*qBF>8E2^7JXLIY;?r`U8^mC?;R`!?T{hjT5n^DYrbEb}|y1%tM97geT^m3+-;OjT1 z|D8Xn_af)lI39TrcHQ8~*3)b2XESfSIk9o-y2uDq5tvsxKTe2 zdh_4o++2D7-AR2l-;4El^4{T%D`U?3-n^+^`O27RP}Oq(KF+H9nXguV`@fWN&Nyb` zc|F!>a?k67x%`*N-YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r R3>YwAz<>b*1`HVZ3 qw(all); +use Carp qw(confess); use File::Basename; use File::Copy 'cp'; +use File::stat; +use Fcntl ':mode'; +use Time::HiRes qw(gettimeofday); use DBI; use lib dirname($0) . '/../lib'; +use BackRest::Exception; use BackRest::Utility; +use BackRest::Config; +use BackRest::Manifest; use BackRest::File; use BackRest::Remote; @@ -27,6 +33,8 @@ our @EXPORT = qw(BackRestTestBackup_Test); my $strTestPath; my $strHost; +my $strUser; +my $strGroup; my $strUserBackRest; my $hDb; @@ -43,11 +51,11 @@ sub BackRestTestBackup_PgConnect ';host=' . BackRestTestCommon_DbPathGet(), BackRestTestCommon_UserGet(), undef, - {AutoCommit => 1, RaiseError => 1}); + {AutoCommit => 0, RaiseError => 1}); } #################################################################################################################################### -# BackRestTestBackup_Disconnect +# BackRestTestBackup_PgDisconnect #################################################################################################################################### sub BackRestTestBackup_PgDisconnect { @@ -66,6 +74,10 @@ sub BackRestTestBackup_PgExecute { my $strSql = shift; my $bCheckpoint = shift; + my $bCommit = shift; + + # Set defaults + $bCommit = defined($bCommit) ? $bCommit : true; # Log and execute the statement &log(DEBUG, "SQL: ${strSql}"); @@ -76,6 +88,11 @@ sub BackRestTestBackup_PgExecute $hStatement->finish(); + if ($bCommit) + { + BackRestTestBackup_PgExecute('commit', false, false); + } + # Perform a checkpoint if requested if (defined($bCheckpoint) && $bCheckpoint) { @@ -83,21 +100,138 @@ sub BackRestTestBackup_PgExecute } } +#################################################################################################################################### +# BackRestTestBackup_PgSwitchXlog +#################################################################################################################################### +sub BackRestTestBackup_PgSwitchXlog +{ + BackRestTestBackup_PgExecute('select pg_switch_xlog()', false, false); + BackRestTestBackup_PgExecute('select pg_switch_xlog()', false, false); +} + +#################################################################################################################################### +# BackRestTestBackup_PgCommit +#################################################################################################################################### +sub BackRestTestBackup_PgCommit +{ + my $bCheckpoint = shift; + + BackRestTestBackup_PgExecute('commit', $bCheckpoint, false); +} + +#################################################################################################################################### +# BackRestTestBackup_PgSelect +#################################################################################################################################### +sub BackRestTestBackup_PgSelect +{ + my $strSql = shift; + + # Log and execute the statement + &log(DEBUG, "SQL: ${strSql}"); + my $hStatement = $hDb->prepare($strSql); + + $hStatement = $hDb->prepare($strSql); + + $hStatement->execute() or + confess &log(ERROR, "Unable to execute: ${strSql}"); + + my @oyRow = $hStatement->fetchrow_array(); + + $hStatement->finish(); + + return @oyRow; +} + +#################################################################################################################################### +# BackRestTestBackup_PgSelectOne +#################################################################################################################################### +sub BackRestTestBackup_PgSelectOne +{ + my $strSql = shift; + + return (BackRestTestBackup_PgSelect($strSql))[0]; +} + +#################################################################################################################################### +# BackRestTestBackup_PgSelectOneTest +#################################################################################################################################### +sub BackRestTestBackup_PgSelectOneTest +{ + my $strSql = shift; + my $strExpectedValue = shift; + my $iTimeout = shift; + + my $lStartTime = time(); + my $strActualValue; + + do + { + $strActualValue = BackRestTestBackup_PgSelectOne($strSql); + + if (defined($strActualValue) && $strActualValue eq $strExpectedValue) + { + return; + } + } + while (defined($iTimeout) && (time() - $lStartTime) <= $iTimeout); + + confess "expected value '${strExpectedValue}' from '${strSql}' but actual was '" . + (defined($strActualValue) ? $strActualValue : '[undef]') . "'"; +} + #################################################################################################################################### # BackRestTestBackup_ClusterStop #################################################################################################################################### sub BackRestTestBackup_ClusterStop { my $strPath = shift; + my $bImmediate = shift; + + # Set default + $strPath = defined($strPath) ? $strPath : BackRestTestCommon_DbCommonPathGet(); + $bImmediate = defined($bImmediate) ? $bImmediate : false; # Disconnect user session BackRestTestBackup_PgDisconnect(); - # If postmaster process is running them stop the cluster + # Drop the cluster + BackRestTestCommon_ClusterStop +} + +#################################################################################################################################### +# BackRestTestBackup_ClusterStart +#################################################################################################################################### +sub BackRestTestBackup_ClusterStart +{ + my $strPath = shift; + my $iPort = shift; + my $bHotStandby = shift; + + # Set default + $iPort = defined($iPort) ? $iPort : BackRestTestCommon_DbPortGet(); + $strPath = defined($strPath) ? $strPath : BackRestTestCommon_DbCommonPathGet(); + $bHotStandby = defined($bHotStandby) ? $bHotStandby : false; + + # Make sure postgres is not running if (-e $strPath . '/postmaster.pid') { - BackRestTestCommon_Execute(BackRestTestCommon_PgSqlBinPathGet() . "/pg_ctl stop -D ${strPath} -w -s -m fast"); + confess 'postmaster.pid exists'; } + + # Creat the archive command + my $strArchive = BackRestTestCommon_CommandMainGet() . ' --stanza=' . BackRestTestCommon_StanzaGet() . + ' --config=' . BackRestTestCommon_DbPathGet() . '/pg_backrest.conf archive-push %p'; + + # Start the cluster + BackRestTestCommon_Execute(BackRestTestCommon_PgSqlBinPathGet() . "/pg_ctl start -o \"-c port=${iPort}" . + ' -c checkpoint_segments=1' . + " -c wal_level=hot_standby -c archive_mode=on -c archive_command='${strArchive}'" . + ($bHotStandby ? ' -c hot_standby=on' : '') . + " -c unix_socket_directories='" . BackRestTestCommon_DbPathGet() . "'\" " . + "-D ${strPath} -l ${strPath}/postgresql.log -w -s"); + + # Connect user session + BackRestTestBackup_PgConnect(); } #################################################################################################################################### @@ -128,14 +262,9 @@ sub BackRestTestBackup_ClusterCreate my $strPath = shift; my $iPort = shift; - my $strArchive = BackRestTestCommon_CommandMainGet() . ' --stanza=' . BackRestTestCommon_StanzaGet() . - ' --config=' . BackRestTestCommon_DbPathGet() . '/pg_backrest.conf archive-push %p'; - BackRestTestCommon_Execute(BackRestTestCommon_PgSqlBinPathGet() . "/initdb -D ${strPath} -A trust"); - BackRestTestCommon_Execute(BackRestTestCommon_PgSqlBinPathGet() . "/pg_ctl start -o \"-c port=${iPort} -c " . - "checkpoint_segments=1 -c wal_level=archive -c archive_mode=on -c archive_command='${strArchive}' " . - "-c unix_socket_directories='" . BackRestTestCommon_DbPathGet() . "'\" " . - "-D ${strPath} -l ${strPath}/postgresql.log -w -s"); + + BackRestTestBackup_ClusterStart($strPath, $iPort); # Connect user session BackRestTestBackup_PgConnect(); @@ -146,18 +275,24 @@ sub BackRestTestBackup_ClusterCreate #################################################################################################################################### sub BackRestTestBackup_Drop { + my $bImmediate = shift; + # Stop the cluster if one is running - BackRestTestBackup_ClusterStop(BackRestTestCommon_DbCommonPathGet()); + BackRestTestBackup_ClusterStop(BackRestTestCommon_DbCommonPathGet(), $bImmediate); - # Remove the backrest private directory - if (-e BackRestTestCommon_BackupPathGet()) - { - BackRestTestCommon_Execute('rm -rf ' . BackRestTestCommon_BackupPathGet(), true, true); - } + # Drop the test path + BackRestTestCommon_Drop(); - # Remove the test directory - system('rm -rf ' . BackRestTestCommon_TestPathGet()) == 0 - or die 'unable to remove ' . BackRestTestCommon_TestPathGet() . 'path'; + # # Remove the backrest private directory + # while (-e BackRestTestCommon_RepoPathGet()) + # { + # BackRestTestCommon_PathRemove(BackRestTestCommon_RepoPathGet(), true, true); + # BackRestTestCommon_PathRemove(BackRestTestCommon_RepoPathGet(), false, true); + # hsleep(.1); + # } + # + # # Remove the test directory + # BackRestTestCommon_PathRemove(BackRestTestCommon_TestPathGet()); } #################################################################################################################################### @@ -173,33 +308,37 @@ sub BackRestTestBackup_Create $bCluster = defined($bCluster) ? $bCluster : true; # Drop the old test directory - BackRestTestBackup_Drop(); + BackRestTestBackup_Drop(true); # Create the test directory - mkdir(BackRestTestCommon_TestPathGet(), oct('0770')) - or confess 'Unable to create ' . BackRestTestCommon_TestPathGet() . ' path'; + BackRestTestCommon_Create(); - # Create the db directory - mkdir(BackRestTestCommon_DbPathGet(), oct('0700')) - or confess 'Unable to create ' . BackRestTestCommon_DbPathGet() . ' path'; + # Create the db paths + BackRestTestCommon_PathCreate(BackRestTestCommon_DbPathGet()); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbCommonPathGet()); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbCommonPathGet(2)); - # Create the db/common directory - mkdir(BackRestTestCommon_DbCommonPathGet()) - or confess 'Unable to create ' . BackRestTestCommon_DbCommonPathGet() . ' path'; + # Create tablespace paths + BackRestTestCommon_PathCreate(BackRestTestCommon_DbTablespacePathGet()); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbTablespacePathGet(1)); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbTablespacePathGet(1, 2)); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbTablespacePathGet(2)); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbTablespacePathGet(2, 2)); # Create the archive directory - mkdir(BackRestTestCommon_ArchivePathGet(), oct('0700')) - or confess 'Unable to create ' . BackRestTestCommon_ArchivePathGet() . ' path'; + if ($bRemote) + { + BackRestTestCommon_PathCreate(BackRestTestCommon_LocalPathGet()); + } # Create the backup directory if ($bRemote) { - BackRestTestCommon_Execute('mkdir -m 700 ' . BackRestTestCommon_BackupPathGet(), true); + BackRestTestCommon_Execute('mkdir -m 700 ' . BackRestTestCommon_RepoPathGet(), true); } else { - mkdir(BackRestTestCommon_BackupPathGet(), oct('0700')) - or confess 'Unable to create ' . BackRestTestCommon_BackupPathGet() . ' path'; + BackRestTestCommon_PathCreate(BackRestTestCommon_RepoPathGet()); } # Create the cluster @@ -209,12 +348,1011 @@ sub BackRestTestBackup_Create } } +#################################################################################################################################### +# BackRestTestBackup_PathCreate +# +# Create a path specifying mode. +#################################################################################################################################### +sub BackRestTestBackup_PathCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strSubPath = shift; + my $strMode = shift; + + # Create final file location + my $strFinalPath = ${$oManifestRef}{'backup:path'}{$strPath} . (defined($strSubPath) ? "/${strSubPath}" : ''); + + # Create the path + if (!(-e $strFinalPath)) + { + BackRestTestCommon_PathCreate($strFinalPath, $strMode); + } + + return $strFinalPath; +} + +#################################################################################################################################### +# BackRestTestBackup_PathMode +# +# Change the mode of a path. +#################################################################################################################################### +sub BackRestTestBackup_PathMode +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strSubPath = shift; + my $strMode = shift; + + # Create final file location + my $strFinalPath = ${$oManifestRef}{'backup:path'}{$strPath} . (defined($strSubPath) ? "/${strSubPath}" : ''); + + BackRestTestCommon_PathMode($strFinalPath, $strMode); + + return $strFinalPath; +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestPathCreate +# +# Create a path specifying mode and add it to the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestPathCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strSubPath = shift; + my $strMode = shift; + + # Create final file location + my $strFinalPath = BackRestTestBackup_PathCreate($oManifestRef, $strPath, $strSubPath, $strMode); + + # Stat the file + my $oStat = lstat($strFinalPath); + + # Check for errors in stat + if (!defined($oStat)) + { + confess 'unable to stat ${strSubPath}'; + } + + my $strManifestPath = defined($strSubPath) ? $strSubPath : '.'; + + # Load file into manifest + ${$oManifestRef}{"${strPath}:path"}{$strManifestPath}{group} = getgrgid($oStat->gid); + ${$oManifestRef}{"${strPath}:path"}{$strManifestPath}{user} = getpwuid($oStat->uid); + ${$oManifestRef}{"${strPath}:path"}{$strManifestPath}{mode} = sprintf('%04o', S_IMODE($oStat->mode)); +} + +#################################################################################################################################### +# BackRestTestBackup_PathRemove +# +# Remove a path. +#################################################################################################################################### +sub BackRestTestBackup_PathRemove +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strSubPath = shift; + + # Create final file location + my $strFinalPath = ${$oManifestRef}{'backup:path'}{$strPath} . (defined($strSubPath) ? "/${strSubPath}" : ''); + + # Create the path + BackRestTestCommon_PathRemove($strFinalPath); + + return $strFinalPath; +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestTablespaceCreate +# +# Create a tablespace specifying mode and add it to the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestTablespaceCreate +{ + my $oManifestRef = shift; + my $iOid = shift; + my $strMode = shift; + + # Create final file location + my $strPath = BackRestTestCommon_DbTablespacePathGet($iOid); + + # Create the path + # if (!(-e $strPath)) + # { + # BackRestTestCommon_PathCreate($strPath, $strMode); + # } + + # Stat the path + my $oStat = lstat($strPath); + + # Check for errors in stat + if (!defined($oStat)) + { + confess 'unable to stat path ${strPath}'; + } + + # Load path into manifest + ${$oManifestRef}{"tablespace:${iOid}:path"}{'.'}{group} = getgrgid($oStat->gid); + ${$oManifestRef}{"tablespace:${iOid}:path"}{'.'}{user} = getpwuid($oStat->uid); + ${$oManifestRef}{"tablespace:${iOid}:path"}{'.'}{mode} = sprintf('%04o', S_IMODE($oStat->mode)); + + # Create the link in pg_tblspc + my $strLink = BackRestTestCommon_DbCommonPathGet() . "/pg_tblspc/${iOid}"; + + symlink($strPath, $strLink) + or confess "unable to link ${strLink} to ${strPath}"; + + # Stat the link + $oStat = lstat($strLink); + + # Check for errors in stat + if (!defined($oStat)) + { + confess 'unable to stat link ${strLink}'; + } + + # Load link into the manifest + ${$oManifestRef}{"base:link"}{"pg_tblspc/${iOid}"}{group} = getgrgid($oStat->gid); + ${$oManifestRef}{"base:link"}{"pg_tblspc/${iOid}"}{user} = getpwuid($oStat->uid); + ${$oManifestRef}{"base:link"}{"pg_tblspc/${iOid}"}{link_destination} = $strPath; + + # Load tablespace into the manifest + ${$oManifestRef}{"backup:tablespace"}{$iOid}{link} = $iOid; + ${$oManifestRef}{"backup:tablespace"}{$iOid}{path} = $strPath; + + ${$oManifestRef}{"backup:path"}{"tablespace:${iOid}"} = $strPath; +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestTablespaceDrop +# +# Drop a tablespace add remove it from the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestTablespaceDrop +{ + my $oManifestRef = shift; + my $iOid = shift; + my $iIndex = shift; + + # Remove tablespace path/file/link from manifest + delete(${$oManifestRef}{"tablespace:${iOid}:path"}); + delete(${$oManifestRef}{"tablespace:${iOid}:link"}); + delete(${$oManifestRef}{"tablespace:${iOid}:file"}); + + # Drop the link in pg_tblspc + BackRestTestCommon_FileRemove(BackRestTestCommon_DbCommonPathGet($iIndex) . "/pg_tblspc/${iOid}"); + + # Remove tablespace rom manifest + delete(${$oManifestRef}{"base:link"}{"pg_tblspc/${iOid}"}); + delete(${$oManifestRef}{"backup:tablespace"}{$iOid}); + delete(${$oManifestRef}{"backup:path"}{"tablespace:${iOid}"}); +} + +#################################################################################################################################### +# BackRestTestBackup_FileCreate +# +# Create a file specifying content, mode, and time. +#################################################################################################################################### +sub BackRestTestBackup_FileCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + my $strContent = shift; + my $lTime = shift; + my $strMode = shift; + + # Create actual file location + my $strPathFile = ${$oManifestRef}{'backup:path'}{$strPath} . "/${strFile}"; + + # Create the file + BackRestTestCommon_FileCreate($strPathFile, $strContent, $lTime, $strMode); + + # Return path to created file + return $strPathFile; +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestFileCreate +# +# Create a file specifying content, mode, and time and add it to the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestFileCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + my $strContent = shift; + my $strChecksum = shift; + my $lTime = shift; + my $strMode = shift; + + # Create the file + my $strPathFile = BackRestTestBackup_FileCreate($oManifestRef, $strPath, $strFile, $strContent, $lTime, $strMode); + + # Stat the file + my $oStat = lstat($strPathFile); + + # Check for errors in stat + if (!defined($oStat)) + { + confess 'unable to stat ${strFile}'; + } + + # Load file into manifest + ${$oManifestRef}{"${strPath}:file"}{$strFile}{group} = getgrgid($oStat->gid); + ${$oManifestRef}{"${strPath}:file"}{$strFile}{user} = getpwuid($oStat->uid); + ${$oManifestRef}{"${strPath}:file"}{$strFile}{mode} = sprintf('%04o', S_IMODE($oStat->mode)); + ${$oManifestRef}{"${strPath}:file"}{$strFile}{modification_time} = $oStat->mtime; + ${$oManifestRef}{"${strPath}:file"}{$strFile}{size} = $oStat->size; + delete(${$oManifestRef}{"${strPath}:file"}{$strFile}{reference}); + + if (defined($strChecksum)) + { + ${$oManifestRef}{"${strPath}:file"}{$strFile}{checksum} = $strChecksum; + } +} + +#################################################################################################################################### +# BackRestTestBackup_FileRemove +# +# Remove a file from disk. +#################################################################################################################################### +sub BackRestTestBackup_FileRemove +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + my $bIgnoreMissing = shift; + + # Create actual file location + my $strPathFile = ${$oManifestRef}{'backup:path'}{$strPath} . "/${strFile}"; + + # Remove the file + if (!(defined($bIgnoreMissing) && $bIgnoreMissing && !(-e $strPathFile))) + { + BackRestTestCommon_FileRemove($strPathFile); + } + + return $strPathFile; +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestFileRemove +# +# Remove a file from disk and (optionally) the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestFileRemove +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + + # Create actual file location + my $strPathFile = ${$oManifestRef}{'backup:path'}{$strPath} . "/${strFile}"; + + # Remove the file + BackRestTestBackup_FileRemove($oManifestRef, $strPath, $strFile, true); + + # Remove from manifest + delete(${$oManifestRef}{"${strPath}:file"}{$strFile}); +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestReference +# +# Update all files that do not have a reference with the supplied reference. +#################################################################################################################################### +sub BackRestTestBackup_ManifestReference +{ + my $oManifestRef = shift; + my $strReference = shift; + my $bClear = shift; + + # Set prior backup + if (defined($strReference)) + { + ${$oManifestRef}{backup}{prior} = $strReference; + } + else + { + delete(${$oManifestRef}{backup}{prior}); + } + + # Clear the reference list + delete(${$oManifestRef}{backup}{reference}); + + # Find all file sections + foreach my $strSectionFile (sort(keys $oManifestRef)) + { + # Skip non-file sections + if ($strSectionFile !~ /\:file$/) + { + next; + } + + foreach my $strFile (sort(keys ${$oManifestRef}{$strSectionFile})) + { + if (!defined($strReference)) + { + delete(${$oManifestRef}{$strSectionFile}{$strFile}{reference}); + } + elsif (defined($bClear) && $bClear) + { + if (defined(${$oManifestRef}{$strSectionFile}{$strFile}{reference}) && + ${$oManifestRef}{$strSectionFile}{$strFile}{reference} ne $strReference) + { + delete(${$oManifestRef}{$strSectionFile}{$strFile}{reference}); + } + } + elsif (!defined(${$oManifestRef}{$strSectionFile}{$strFile}{reference})) + { + ${$oManifestRef}{$strSectionFile}{$strFile}{reference} = $strReference; + } + } + } +} + +#################################################################################################################################### +# BackRestTestBackup_LinkCreate +# +# Create a file specifying content, mode, and time. +#################################################################################################################################### +sub BackRestTestBackup_LinkCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + my $strDestination = shift; + + # Create actual file location + my $strPathFile = ${$oManifestRef}{'backup:path'}{$strPath} . "/${strFile}"; + + # Create the file + symlink($strDestination, $strPathFile) + or confess "unable to link ${strPathFile} to ${strDestination}"; + + # Return path to created file + return $strPathFile; +} + +#################################################################################################################################### +# BackRestTestBackup_LinkRemove +# +# Remove a link from disk. +#################################################################################################################################### +# sub BackRestTestBackup_LinkRemove +# { +# my $oManifestRef = shift; +# my $strPath = shift; +# my $strFile = shift; +# my $bManifestRemove = shift; +# +# # Create actual file location +# my $strPathFile = ${$oManifestRef}{'backup:path'}{$strPath} . "/${strFile}"; +# +# # Remove the file +# if (-e $strPathFile) +# { +# BackRestTestCommon_FileRemove($strPathFile); +# } +# +# # Remove from manifest +# if (defined($bManifestRemove) && $bManifestRemove) +# { +# delete(${$oManifestRef}{"${strPath}:file"}{$strFile}); +# } +# } + +#################################################################################################################################### +# BackRestTestBackup_ManifestLinkCreate +# +# Create a link and add it to the manifest. +#################################################################################################################################### +sub BackRestTestBackup_ManifestLinkCreate +{ + my $oManifestRef = shift; + my $strPath = shift; + my $strFile = shift; + my $strDestination = shift; + + # Create the file + my $strPathFile = BackRestTestBackup_LinkCreate($oManifestRef, $strPath, $strFile, $strDestination); + + # Stat the file + my $oStat = lstat($strPathFile); + + # Check for errors in stat + if (!defined($oStat)) + { + confess 'unable to stat ${strFile}'; + } + + # Load file into manifest + ${$oManifestRef}{"${strPath}:link"}{$strFile}{group} = getgrgid($oStat->gid); + ${$oManifestRef}{"${strPath}:link"}{$strFile}{user} = getpwuid($oStat->uid); + ${$oManifestRef}{"${strPath}:link"}{$strFile}{link_destination} = $strDestination; +} + +#################################################################################################################################### +# BackRestTestBackup_LastBackup +#################################################################################################################################### +sub BackRestTestBackup_LastBackup +{ + my $oFile = shift; + + my @stryBackup = $oFile->list(PATH_BACKUP_CLUSTER, undef, undef, 'reverse'); + + if (!defined($stryBackup[1])) + { + confess 'no backup was found'; + } + + return $stryBackup[1]; +} + +#################################################################################################################################### +# BackRestTestBackup_BackupBegin +#################################################################################################################################### +sub BackRestTestBackup_BackupBegin +{ + my $strType = shift; + my $strStanza = shift; + my $bRemote = shift; + my $strComment = shift; + my $bSynthetic = shift; + my $bTestPoint = shift; + my $fTestDelay = shift; + + # Set defaults + $bTestPoint = defined($bTestPoint) ? $bTestPoint : false; + $fTestDelay = defined($fTestDelay) ? $fTestDelay : 0; + + &log(INFO, " ${strType} backup" . (defined($strComment) ? " (${strComment})" : '')); + + BackRestTestCommon_ExecuteBegin(BackRestTestCommon_CommandMainGet() . ' --config=' . + ($bRemote ? BackRestTestCommon_RepoPathGet() : BackRestTestCommon_DbPathGet()) . + "/pg_backrest.conf" . ($bSynthetic ? " --no-start-stop" : '') . + ($strType ne 'incr' ? " --type=${strType}" : '') . + " --stanza=${strStanza} backup" . ($bTestPoint ? " --test --test-delay=${fTestDelay}": ''), + $bRemote); +} + +#################################################################################################################################### +# BackRestTestBackup_BackupEnd +#################################################################################################################################### +sub BackRestTestBackup_BackupEnd +{ + my $strType = shift; + my $oFile = shift; + my $bRemote = shift; + my $strBackup = shift; + my $oExpectedManifestRef = shift; + my $bSynthetic = shift; + my $iExpectedExitStatus = shift; + + my $iExitStatus = BackRestTestCommon_ExecuteEnd(undef, undef, undef, $iExpectedExitStatus); + + if (defined($iExpectedExitStatus)) + { + return undef; + } + + ${$oExpectedManifestRef}{backup}{type} = $strType; + + if (!defined($strBackup)) + { + $strBackup = BackRestTestBackup_LastBackup($oFile); + } + + if ($bSynthetic) + { + BackRestTestBackup_BackupCompare($oFile, $bRemote, $strBackup, $oExpectedManifestRef); + } + + return $strBackup; +} + +#################################################################################################################################### +# BackRestTestBackup_BackupSynthetic +#################################################################################################################################### +sub BackRestTestBackup_BackupSynthetic +{ + my $strType = shift; + my $strStanza = shift; + my $bRemote = shift; + my $oFile = shift; + my $oExpectedManifestRef = shift; + my $strComment = shift; + my $strTestPoint = shift; + my $fTestDelay = shift; + my $iExpectedExitStatus = shift; + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, $strComment, true, defined($strTestPoint), $fTestDelay); + + if (defined($strTestPoint)) + { + BackRestTestCommon_ExecuteEnd($strTestPoint); + } + + return BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, $oExpectedManifestRef, true, $iExpectedExitStatus); +} + +#################################################################################################################################### +# BackRestTestBackup_Backup +#################################################################################################################################### +sub BackRestTestBackup_Backup +{ + my $strType = shift; + my $strStanza = shift; + my $bRemote = shift; + my $oFile = shift; + my $strComment = shift; + my $strTestPoint = shift; + my $fTestDelay = shift; + my $iExpectedExitStatus = shift; + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, $strComment, false, defined($strTestPoint), $fTestDelay); + + if (defined($strTestPoint)) + { + BackRestTestCommon_ExecuteEnd($strTestPoint); + } + + return BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, undef, false, $iExpectedExitStatus); +} + +#################################################################################################################################### +# BackRestTestBackup_BackupCompare +#################################################################################################################################### +sub BackRestTestBackup_BackupCompare +{ + my $oFile = shift; + my $bRemote = shift; + my $strBackup = shift; + my $oExpectedManifestRef = shift; + + ${$oExpectedManifestRef}{backup}{label} = $strBackup; + + # Remove old reference list + delete(${$oExpectedManifestRef}{backup}{reference}); + + # Build the new reference list + foreach my $strSectionFile (sort(keys $oExpectedManifestRef)) + { + # Skip non-file sections + if ($strSectionFile !~ /\:file$/) + { + next; + } + + foreach my $strFile (sort(keys ${$oExpectedManifestRef}{$strSectionFile})) + { + if (defined(${$oExpectedManifestRef}{$strSectionFile}{$strFile}{reference})) + { + my $strFileReference = ${$oExpectedManifestRef}{$strSectionFile}{$strFile}{reference}; + + if (!defined(${$oExpectedManifestRef}{backup}{reference})) + { + ${$oExpectedManifestRef}{backup}{reference} = $strFileReference; + } + else + { + if (${$oExpectedManifestRef}{backup}{reference} !~ /^$strFileReference|,$strFileReference/) + { + ${$oExpectedManifestRef}{backup}{reference} .= ",${strFileReference}"; + } + } + } + } + } + + # Change mode on the backup path so it can be read + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 750 ' . BackRestTestCommon_RepoPathGet(), true); + } + + my %oActualManifest; + ini_load($oFile->path_get(PATH_BACKUP_CLUSTER, $strBackup) . '/backup.manifest', \%oActualManifest); + + ${$oExpectedManifestRef}{backup}{'timestamp-start'} = $oActualManifest{backup}{'timestamp-start'}; + ${$oExpectedManifestRef}{backup}{'timestamp-stop'} = $oActualManifest{backup}{'timestamp-stop'}; + ${$oExpectedManifestRef}{backup}{'timestamp-copy-start'} = $oActualManifest{backup}{'timestamp-copy-start'}; + ${$oExpectedManifestRef}{backup}{'checksum'} = $oActualManifest{backup}{'checksum'}; + ${$oExpectedManifestRef}{backup}{format} = FORMAT; + + my $strTestPath = BackRestTestCommon_TestPathGet(); + + ini_save("${strTestPath}/actual.manifest", \%oActualManifest); + ini_save("${strTestPath}/expected.manifest", $oExpectedManifestRef); + + BackRestTestCommon_Execute("diff ${strTestPath}/expected.manifest ${strTestPath}/actual.manifest"); + + # Change mode on the backup path back before unit tests continue + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 700 ' . BackRestTestCommon_RepoPathGet(), true); + } + + $oFile->remove(PATH_ABSOLUTE, "${strTestPath}/expected.manifest"); + $oFile->remove(PATH_ABSOLUTE, "${strTestPath}/actual.manifest"); +} + +#################################################################################################################################### +# BackRestTestBackup_ManifestMunge +# +# Allows for munging of the manifest while make it appear to be valid. This is used to create various error conditions that should +# be caught by the unit tests. +#################################################################################################################################### +sub BackRestTestBackup_ManifestMunge +{ + my $oFile = shift; + my $bRemote = shift; + my $strBackup = shift; + my $strSection = shift; + my $strKey = shift; + my $strSubKey = shift; + my $strValue = shift; + + # Make sure the new value is at least vaguely reasonable + if (!defined($strSection) || !defined($strKey)) + { + confess &log(ASSERT, 'strSection and strKey must be defined'); + } + + # Change mode on the backup path so it can be read/written + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 750 ' . BackRestTestCommon_RepoPathGet(), true); + BackRestTestCommon_Execute('chmod 770 ' . $oFile->path_get(PATH_BACKUP_CLUSTER, $strBackup) . '/backup.manifest', true); + } + + # Read the manifest + my %oManifest; + ini_load($oFile->path_get(PATH_BACKUP_CLUSTER, $strBackup) . '/backup.manifest', \%oManifest); + + # Write in the munged value + if (defined($strSubKey)) + { + if (defined($strValue)) + { + $oManifest{$strSection}{$strKey}{$strSubKey} = $strValue; + } + else + { + delete($oManifest{$strSection}{$strKey}{$strSubKey}); + } + } + else + { + if (defined($strValue)) + { + $oManifest{$strSection}{$strKey} = $strValue; + } + else + { + delete($oManifest{$strSection}{$strKey}); + } + } + + # Remove the old checksum + delete($oManifest{backup}{checksum}); + + my $oSHA = Digest::SHA->new('sha1'); + + # Calculate the checksum from manifest values + foreach my $strSection (sort(keys(%oManifest))) + { + $oSHA->add($strSection); + + foreach my $strKey (sort(keys($oManifest{$strSection}))) + { + $oSHA->add($strKey); + + my $strValue = $oManifest{$strSection}{$strKey}; + + if (!defined($strValue)) + { + confess &log(ASSERT, "section ${strSection}, key ${$strKey} has undef value"); + } + + if (ref($strValue) eq "HASH") + { + foreach my $strSubKey (sort(keys($oManifest{$strSection}{$strKey}))) + { + my $strSubValue = $oManifest{$strSection}{$strKey}{$strSubKey}; + + if (!defined($strSubValue)) + { + confess &log(ASSERT, "section ${strSection}, key ${strKey}, subkey ${strSubKey} has undef value"); + } + + $oSHA->add($strSubValue); + } + } + else + { + $oSHA->add($strValue); + } + } + } + + # Set the new checksum + $oManifest{backup}{checksum} = $oSHA->hexdigest(); + + # Resave the manifest + ini_save($oFile->path_get(PATH_BACKUP_CLUSTER, $strBackup) . '/backup.manifest', \%oManifest); + + # Change mode on the backup path back before unit tests continue + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 750 ' . $oFile->path_get(PATH_BACKUP_CLUSTER, $strBackup) . '/backup.manifest', true); + BackRestTestCommon_Execute('chmod 700 ' . BackRestTestCommon_RepoPathGet(), true); + } +} + +#################################################################################################################################### +# BackRestTestBackup_Restore +#################################################################################################################################### +sub BackRestTestBackup_Restore +{ + my $oFile = shift; + my $strBackup = shift; + my $strStanza = shift; + my $bRemote = shift; + my $oExpectedManifestRef = shift; + my $oRemapHashRef = shift; + my $bDelta = shift; + my $bForce = shift; + my $strType = shift; + my $strTarget = shift; + my $bTargetExclusive = shift; + my $bTargetResume = shift; + my $strTargetTimeline = shift; + my $oRecoveryHashRef = shift; + my $strComment = shift; + my $iExpectedExitStatus = shift; + + # Set defaults + $bDelta = defined($bDelta) ? $bDelta : false; + $bForce = defined($bForce) ? $bForce : false; + + my $bSynthetic = defined($oExpectedManifestRef) ? true : false; + + &log(INFO, ' restore' . + ($bDelta ? ' delta' : '') . + ($bForce ? ', force' : '') . + ($strBackup ne 'latest' ? ", backup '${strBackup}'" : '') . + ($strType ? ", type '${strType}'" : '') . + ($strTarget ? ", target '${strTarget}'" : '') . + ($strTargetTimeline ? ", timeline '${strTargetTimeline}'" : '') . + (defined($bTargetExclusive) && $bTargetExclusive ? ', exclusive' : '') . + (defined($bTargetResume) && $bTargetResume ? ', resume' : '') . + (defined($oRemapHashRef) ? ', remap' : '') . + (defined($iExpectedExitStatus) ? ", expect exit ${iExpectedExitStatus}" : '') . + (defined($strComment) ? " (${strComment})" : '')); + + if (!defined($oExpectedManifestRef)) + { + # Change mode on the backup path so it can be read + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 750 ' . BackRestTestCommon_RepoPathGet(), true); + } + + my $oExpectedManifest = new BackRest::Manifest(BackRestTestCommon_RepoPathGet() . + "/backup/${strStanza}/${strBackup}/backup.manifest", true); + + $oExpectedManifestRef = $oExpectedManifest->{oManifest}; + + # Change mode on the backup path back before unit tests continue + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 700 ' . BackRestTestCommon_RepoPathGet(), true); + } + } + + if (defined($oRemapHashRef)) + { + BackRestTestCommon_ConfigRemap($oRemapHashRef, $oExpectedManifestRef, $bRemote); + } + + if (defined($oRecoveryHashRef)) + { + BackRestTestCommon_ConfigRecovery($oRecoveryHashRef, $bRemote); + } + + # Create the backup command + BackRestTestCommon_Execute(BackRestTestCommon_CommandMainGet() . ' --config=' . BackRestTestCommon_DbPathGet() . + '/pg_backrest.conf' . (defined($bDelta) && $bDelta ? ' --delta' : '') . + (defined($bForce) && $bForce ? ' --force' : '') . + ($strBackup ne 'latest' ? " --set=${strBackup}" : '') . + (defined($strType) && $strType ne RECOVERY_TYPE_DEFAULT ? " --type=${strType}" : '') . + (defined($strTarget) ? " --target=\"${strTarget}\"" : '') . + (defined($strTargetTimeline) ? " --target-timeline=\"${strTargetTimeline}\"" : '') . + (defined($bTargetExclusive) && $bTargetExclusive ? " --target-exclusive" : '') . + (defined($bTargetResume) && $bTargetResume ? " --target-resume" : '') . + " --stanza=${strStanza} restore", + undef, undef, undef, $iExpectedExitStatus); + + if (!defined($iExpectedExitStatus)) + { + BackRestTestBackup_RestoreCompare($oFile, $strStanza, $bRemote, $strBackup, $bSynthetic, $oExpectedManifestRef); + } +} + +#################################################################################################################################### +# BackRestTestBackup_RestoreCompare +#################################################################################################################################### +sub BackRestTestBackup_RestoreCompare +{ + my $oFile = shift; + my $strStanza = shift; + my $bRemote = shift; + my $strBackup = shift; + my $bSynthetic = shift; + my $oExpectedManifestRef = shift; + + my $strTestPath = BackRestTestCommon_TestPathGet(); + + # Load the last manifest if it exists + my $oLastManifest = undef; + + if (defined(${$oExpectedManifestRef}{'backup'}{'prior'})) + { + # Change mode on the backup path so it can be read + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 750 ' . BackRestTestCommon_RepoPathGet(), true); + } + + my $oExpectedManifest = new BackRest::Manifest(BackRestTestCommon_RepoPathGet() . + "/backup/${strStanza}/${strBackup}/backup.manifest", true); + + $oLastManifest = new BackRest::Manifest(BackRestTestCommon_RepoPathGet() . + "/backup/${strStanza}/" . ${$oExpectedManifestRef}{'backup'}{'prior'} . + '/backup.manifest', true); + + # Change mode on the backup path back before unit tests continue + if ($bRemote) + { + BackRestTestCommon_Execute('chmod 700 ' . BackRestTestCommon_RepoPathGet(), true); + } + + } + + # Generate the actual manifest + my $oActualManifest = new BackRest::Manifest("${strTestPath}/actual.manifest", false); + + my $oTablespaceMapRef = undef; + $oActualManifest->build($oFile, ${$oExpectedManifestRef}{'backup:path'}{'base'}, $oLastManifest, true, undef); + + # Generate checksums for all files if required + # Also fudge size if this is a synthetic test - sizes may change during backup. + foreach my $strSectionPathKey ($oActualManifest->keys('backup:path')) + { + my $strSectionPath = $oActualManifest->get('backup:path', $strSectionPathKey); + + # Create all paths in the manifest that do not already exist + my $strSection = "${strSectionPathKey}:file"; + + foreach my $strName ($oActualManifest->keys($strSection)) + { + if (!$bSynthetic) + { + $oActualManifest->set($strSection, $strName, 'size', ${$oExpectedManifestRef}{$strSection}{$strName}{size}); + } + + if ($oActualManifest->get($strSection, $strName, 'size') != 0) + { + $oActualManifest->set($strSection, $strName, 'checksum', + $oFile->hash(PATH_DB_ABSOLUTE, "${strSectionPath}/${strName}")); + } + } + } + + # Set actual to expected for settings that always change from backup to backup + $oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, + ${$oExpectedManifestRef}{'backup:option'}{compress}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, + ${$oExpectedManifestRef}{'backup:option'}{hardlink}); + + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_VERSION, undef, + ${$oExpectedManifestRef}{'backup'}{version}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_COPY_START, undef, + ${$oExpectedManifestRef}{'backup'}{'timestamp-copy-start'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_START, undef, + ${$oExpectedManifestRef}{'backup'}{'timestamp-start'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_STOP, undef, + ${$oExpectedManifestRef}{'backup'}{'timestamp-stop'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL, undef, + ${$oExpectedManifestRef}{'backup'}{'label'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE, undef, + ${$oExpectedManifestRef}{'backup'}{'type'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_CHECKSUM, undef, + ${$oExpectedManifestRef}{'backup'}{'checksum'}); + + if (!$bSynthetic) + { + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START, undef, + ${$oExpectedManifestRef}{'backup'}{'archive-start'}); + $oActualManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP, undef, + ${$oExpectedManifestRef}{'backup'}{'archive-stop'}); + } + + ini_save("${strTestPath}/actual.manifest", $oActualManifest->{oManifest}); + ini_save("${strTestPath}/expected.manifest", $oExpectedManifestRef); + + BackRestTestCommon_Execute("diff ${strTestPath}/expected.manifest ${strTestPath}/actual.manifest"); + + $oFile->remove(PATH_ABSOLUTE, "${strTestPath}/expected.manifest"); + $oFile->remove(PATH_ABSOLUTE, "${strTestPath}/actual.manifest"); +} + +#################################################################################################################################### +# BackRestTestBackup_Expire +#################################################################################################################################### +sub BackRestTestBackup_Expire +{ + my $strStanza = shift; + my $oFile = shift; + my $stryBackupExpectedRef = shift; + my $stryArchiveExpectedRef = shift; + my $iExpireFull = shift; + my $iExpireDiff = shift; + my $strExpireArchiveType = shift; + my $iExpireArchive = shift; + + my $strCommand = BackRestTestCommon_CommandMainGet() . ' --config=' . BackRestTestCommon_DbPathGet() . + "/pg_backrest.conf --stanza=${strStanza} expire"; + + if (defined($iExpireFull)) + { + $strCommand .= ' --retention-full=' . $iExpireFull; + } + + if (defined($iExpireDiff)) + { + $strCommand .= ' --retention-diff=' . $iExpireDiff; + } + + if (defined($strExpireArchiveType)) + { + $strCommand .= ' --retention-archive-type=' . $strExpireArchiveType . + ' --retention-archive=' . $iExpireArchive; + } + + BackRestTestCommon_Execute($strCommand); + + # Check that the correct backups were expired + my @stryBackupActual = $oFile->list(PATH_BACKUP_CLUSTER); + + if (join(",", @stryBackupActual) ne join(",", @{$stryBackupExpectedRef})) + { + confess "expected backup list:\n " . join("\n ", @{$stryBackupExpectedRef}) . + "\n\nbut actual was:\n " . join("\n ", @stryBackupActual) . "\n"; + } + + # Check that the correct archive logs were expired + my @stryArchiveActual = $oFile->list(PATH_BACKUP_ARCHIVE, '0000000100000000'); + + if (join(",", @stryArchiveActual) ne join(",", @{$stryArchiveExpectedRef})) + { + confess "expected archive list:\n " . join("\n ", @{$stryArchiveExpectedRef}) . + "\n\nbut actual was:\n " . join("\n ", @stryArchiveActual) . "\n"; + } +} + #################################################################################################################################### # BackRestTestBackup_Test #################################################################################################################################### sub BackRestTestBackup_Test { my $strTest = shift; + my $iThreadMax = shift; # If no test was specified, then run them all if (!defined($strTest)) @@ -226,30 +1364,43 @@ sub BackRestTestBackup_Test $strTestPath = BackRestTestCommon_TestPathGet(); $strHost = BackRestTestCommon_HostGet(); $strUserBackRest = BackRestTestCommon_UserBackRestGet(); + $strUser = BackRestTestCommon_UserGet(); + $strGroup = BackRestTestCommon_GroupGet(); # Setup test variables my $iRun; my $bCreate; my $strStanza = BackRestTestCommon_StanzaGet(); - my $strGroup = BackRestTestCommon_GroupGet(); my $strArchiveChecksum = '1c7e00fd09b9dd11fc2966590b3e3274645dd031'; my $iArchiveMax = 3; my $strXlogPath = BackRestTestCommon_DbCommonPathGet() . '/pg_xlog'; - my $strArchiveTestFile = BackRestTestCommon_DataPathGet() . '/test.archive.bin'; - my $iThreadMax = 4; + my $strArchiveTestFile = BackRestTestCommon_DataPathGet() . '/test.archive2.bin'; # Print test banner &log(INFO, 'BACKUP MODULE ******************************************************************'); #------------------------------------------------------------------------------------------------------------------------------- - # Create remote + # Create remotes #------------------------------------------------------------------------------------------------------------------------------- my $oRemote = BackRest::Remote->new ( - strHost => $strHost, - strUser => $strUserBackRest, - strCommand => BackRestTestCommon_CommandRemoteGet() + $strHost, # Host + $strUserBackRest, # User + BackRestTestCommon_CommandRemoteGet(), # Command + OPTION_DEFAULT_BUFFER_SIZE, # Buffer size + OPTION_DEFAULT_COMPRESS_LEVEL, # Compress level + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, # Compress network level + ); + + my $oLocal = new BackRest::Remote + ( + undef, # Host + undef, # User + undef, # Command + OPTION_DEFAULT_BUFFER_SIZE, # Buffer size + OPTION_DEFAULT_COMPRESS_LEVEL, # Compress level + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, # Compress network level ); #------------------------------------------------------------------------------------------------------------------------------- @@ -267,46 +1418,38 @@ sub BackRestTestBackup_Test { for (my $bCompress = false; $bCompress <= true; $bCompress++) { - for (my $bChecksum = false; $bChecksum <= true; $bChecksum++) - { - for (my $bArchiveAsync = false; $bArchiveAsync <= $bRemote; $bArchiveAsync++) - { - for (my $bCompressAsync = false; $bCompressAsync <= true; $bCompressAsync++) + for (my $bArchiveAsync = false; $bArchiveAsync <= true; $bArchiveAsync++) { # Increment the run, log, and decide whether this unit test should be run if (!BackRestTestCommon_Run(++$iRun, - "rmt ${bRemote}, cmp ${bCompress}, chk ${bChecksum}, " . - "arc_async ${bArchiveAsync}, cmp_async ${bCompressAsync}")) {next} + "rmt ${bRemote}, cmp ${bCompress}, " . + "arc_async ${bArchiveAsync}")) {next} # Create the test directory if ($bCreate) { # Create the file object - $oFile = (BackRest::File->new + $oFile = (new BackRest::File ( - strStanza => $strStanza, - strBackupPath => BackRestTestCommon_BackupPathGet(), - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + BackRestTestCommon_RepoPathGet(), + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ))->clone(); BackRestTestBackup_Create($bRemote, false); - # - # # Create the db/common/pg_xlog directory - # mkdir($strXlogPath) - # or confess 'Unable to create ${strXlogPath} path'; $bCreate = false; } BackRestTestCommon_ConfigCreate('db', - ($bRemote ? REMOTE_BACKUP : undef), + ($bRemote ? BACKUP : undef), $bCompress, - $bChecksum, # checksum + undef, # checksum undef, # hardlink undef, # thread-max $bArchiveAsync, - $bCompressAsync); + undef); my $strCommand = BackRestTestCommon_CommandMainGet() . ' --config=' . BackRestTestCommon_DbPathGet() . '/pg_backrest.conf --stanza=db archive-push'; @@ -345,12 +1488,7 @@ sub BackRestTestBackup_Test BackRestTestCommon_Execute($strCommand . " ${strSourceFile}"); # Build the archive name to check for at the destination - my $strArchiveCheck = $strArchiveFile; - - if ($bChecksum) - { - $strArchiveCheck .= "-${strArchiveChecksum}"; - } + my $strArchiveCheck = "${strArchiveFile}-${strArchiveChecksum}"; if ($bCompress) { @@ -359,7 +1497,7 @@ sub BackRestTestBackup_Test if (!$oFile->exists(PATH_BACKUP_ARCHIVE, $strArchiveCheck)) { - sleep(1); + hsleep(1); if (!$oFile->exists(PATH_BACKUP_ARCHIVE, $strArchiveCheck)) { @@ -372,8 +1510,6 @@ sub BackRestTestBackup_Test } } } - } - } $bCreate = true; } @@ -381,7 +1517,7 @@ sub BackRestTestBackup_Test if (BackRestTestCommon_Cleanup()) { &log(INFO, 'cleanup'); - BackRestTestBackup_Drop(); + BackRestTestBackup_Drop(true); } } @@ -400,13 +1536,11 @@ sub BackRestTestBackup_Test { for (my $bCompress = false; $bCompress <= true; $bCompress++) { - for (my $bChecksum = false; $bChecksum <= true; $bChecksum++) - { for (my $bExists = false; $bExists <= true; $bExists++) { # Increment the run, log, and decide whether this unit test should be run if (!BackRestTestCommon_Run(++$iRun, - "rmt ${bRemote}, cmp ${bCompress}, chk ${bChecksum}, exists ${bExists}")) {next} + "rmt ${bRemote}, cmp ${bCompress}, exists ${bExists}")) {next} # Create the test directory if ($bCreate) @@ -414,29 +1548,28 @@ sub BackRestTestBackup_Test # Create the file object $oFile = (BackRest::File->new ( - strStanza => $strStanza, - strBackupPath => BackRestTestCommon_BackupPathGet(), - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + BackRestTestCommon_RepoPathGet(), + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ))->clone(); BackRestTestBackup_Create($bRemote, false); # Create the db/common/pg_xlog directory - mkdir($strXlogPath) - or confess 'Unable to create ${strXlogPath} path'; + BackRestTestCommon_PathCreate($strXlogPath); $bCreate = false; } - BackRestTestCommon_ConfigCreate('db', # local - ($bRemote ? REMOTE_BACKUP : undef), # remote - $bCompress, # compress - $bChecksum, # checksum - undef, # hardlink - undef, # thread-max - undef, # archive-async - undef); # compress-async + BackRestTestCommon_ConfigCreate('db', # local + ($bRemote ? BACKUP : undef), # remote + $bCompress, # compress + undef, # checksum + undef, # hardlink + undef, # thread-max + undef, # archive-async + undef); # compress-async my $strCommand = BackRestTestCommon_CommandMainGet() . ' --config=' . BackRestTestCommon_DbPathGet() . '/pg_backrest.conf --stanza=db archive-get'; @@ -459,12 +1592,7 @@ sub BackRestTestBackup_Test &log(INFO, ' archive ' .sprintf('%02x', $iArchiveNo) . " - ${strArchiveFile}"); - my $strSourceFile = $strArchiveFile; - - if ($bChecksum) - { - $strSourceFile .= "-${strArchiveChecksum}"; - } + my $strSourceFile = "${strArchiveFile}-${strArchiveChecksum}"; if ($bCompress) { @@ -508,18 +1636,437 @@ sub BackRestTestBackup_Test $bCreate = true; } } - } } if (BackRestTestCommon_Cleanup()) { &log(INFO, 'cleanup'); - BackRestTestBackup_Drop(); + BackRestTestBackup_Drop(true); + } + } + + #------------------------------------------------------------------------------------------------------------------------------- + # Test expire + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'all' || $strTest eq 'expire') + { + $iRun = 0; + my $oFile; + + &log(INFO, "Test expire\n"); + + # Create the file object + $oFile = (BackRest::File->new + ( + $strStanza, + BackRestTestCommon_RepoPathGet(), + 'db', + $oLocal + ))->clone(); + + # Create the database + BackRestTestBackup_Create(false); + + # Create db config + BackRestTestCommon_ConfigCreate('db', # local + undef, # remote + false, # compress + undef, # checksum + undef, # hardlink + $iThreadMax, # thread-max + undef, # archive-async + undef); # compress-async + + # Create backup config + BackRestTestCommon_ConfigCreate('backup', # local + undef, # remote + false, # compress + undef, # checksum + undef, # hardlink + $iThreadMax, # thread-max + undef, # archive-async + undef); # compress-async + + # Backups + my @stryBackupExpected; + + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_FULL, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_INCR, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_FULL, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_INCR, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_FULL, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_INCR, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_INCR, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_INCR, $strStanza, false, $oFile); + push @stryBackupExpected, BackRestTestBackup_Backup(BACKUP_TYPE_DIFF, $strStanza, false, $oFile); + push @stryBackupExpected, 'latest'; + + # Create an archive log path that will be removed as old on the first archive expire call + $oFile->path_create(PATH_BACKUP_ARCHIVE, '0000000000000000'); + + # Get the expected archive list + my @stryArchiveExpected = $oFile->list(PATH_BACKUP_ARCHIVE, '0000000100000000'); + + # Expire all but the last two fulls + splice(@stryBackupExpected, 0, 3); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 2); + + # Expire all but the last three diffs + splice(@stryBackupExpected, 1, 3); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, undef, 3); + + # Expire all but the last three diffs and last two fulls (should be no change) + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 2, 3); + + # Expire archive based on the last two fulls + splice(@stryArchiveExpected, 0, 10); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 2, 3, 'full', 2); + + if ($oFile->exists(PATH_BACKUP_ARCHIVE, '0000000000000000')) + { + confess 'archive log path 0000000000000000 should have been removed'; + } + + # Expire archive based on the last two diffs + splice(@stryArchiveExpected, 0, 18); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 2, 3, 'diff', 2); + + # Expire archive based on the last two incrs + splice(@stryBackupExpected, 0, 2); + splice(@stryArchiveExpected, 0, 9); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 1, 2, 'incr', 2); + + # Expire archive based on the last two incrs (no change in archive) + splice(@stryBackupExpected, 1, 4); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 1, 1, 'incr', 2); + + # Expire archive based on the last two diffs (no change in archive) + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 1, 1, 'diff', 2); + + # Expire archive based on the last diff + splice(@stryArchiveExpected, 0, 3); + BackRestTestBackup_Expire($strStanza, $oFile, \@stryBackupExpected, \@stryArchiveExpected, 1, 1, 'diff', 1); + + # Cleanup + if (BackRestTestCommon_Cleanup()) + { + &log(INFO, 'cleanup'); + BackRestTestBackup_Drop(true); + } + } + + #------------------------------------------------------------------------------------------------------------------------------- + # Test backup + # + # Check the backup and restore functionality using synthetic data. + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'all' || $strTest eq 'backup') + { + $iRun = 0; + + &log(INFO, "Test Backup\n"); + + for (my $bRemote = false; $bRemote <= true; $bRemote++) + { + for (my $bCompress = false; $bCompress <= true; $bCompress++) + { + for (my $bHardlink = false; $bHardlink <= true; $bHardlink++) + { + # Increment the run, log, and decide whether this unit test should be run + if (!BackRestTestCommon_Run(++$iRun, + "rmt ${bRemote}, cmp ${bCompress}, hardlink ${bHardlink}")) {next} + + # Get base time + my $lTime = time() - 100000; + + # Build the manifest + my %oManifest; + + $oManifest{backup}{version} = version_get(); + $oManifest{'backup:option'}{compress} = $bCompress ? 'y' : 'n'; + $oManifest{'backup:option'}{hardlink} = $bHardlink ? 'y' : 'n'; + + # Create the test directory + BackRestTestBackup_Create($bRemote, false); + + $oManifest{'backup:path'}{base} = BackRestTestCommon_DbCommonPathGet(); + + BackRestTestBackup_ManifestPathCreate(\%oManifest, 'base'); + + # Create the file object + my $oFile = new BackRest::File + ( + $strStanza, + BackRestTestCommon_RepoPathGet(), + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal + ); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'PG_VERSION', '9.3', + 'e1f7a3a299f62225cba076fc6d3d6e677f303482', $lTime); + + # Create base path + BackRestTestBackup_ManifestPathCreate(\%oManifest, 'base', 'base'); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'base/base1.txt', 'BASE', + 'a3b357a3e395e43fcfb19bb13f3c1b5179279593', $lTime); + + # Create tablespace path + BackRestTestBackup_ManifestPathCreate(\%oManifest, 'base', 'pg_tblspc'); + + # Create db config + BackRestTestCommon_ConfigCreate('db', # local + $bRemote ? BACKUP : undef, # remote + $bCompress, # compress + true, # checksum + $bRemote ? undef : $bHardlink, # hardlink + $iThreadMax); # thread-max + + # Create backup config + if ($bRemote) + { + BackRestTestCommon_ConfigCreate('backup', # local + DB, # remote + $bCompress, # compress + true, # checksum + $bHardlink, # hardlink + $iThreadMax); # thread-max + } + + # Full backup + #----------------------------------------------------------------------------------------------------------------------- + my $strType = 'full'; + + BackRestTestBackup_ManifestLinkCreate(\%oManifest, 'base', 'link-test', '/test'); + BackRestTestBackup_ManifestPathCreate(\%oManifest, 'base', 'path-test'); + + my $strFullBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest); + + # Resume Full Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'full'; + + my $strTmpPath = BackRestTestCommon_RepoPathGet() . "/temp/${strStanza}.tmp"; + + BackRestTestCommon_PathMove(BackRestTestCommon_RepoPathGet() . "/backup/${strStanza}/${strFullBackup}", + $strTmpPath, $bRemote); + + $strFullBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, + 'resume', TEST_BACKUP_RESUME); + + # Restore - tests various mode, extra files/paths, missing files/paths + #----------------------------------------------------------------------------------------------------------------------- + my $bDelta = true; + my $bForce = false; + + # Create a path and file that are not in the manifest + BackRestTestBackup_PathCreate(\%oManifest, 'base', 'deleteme'); + BackRestTestBackup_FileCreate(\%oManifest, 'base', 'deleteme/deleteme.txt', 'DELETEME'); + + # Change path mode + BackRestTestBackup_PathMode(\%oManifest, 'base', 'base', '0777'); + + # Change an existing link to the wrong directory + BackRestTestBackup_FileRemove(\%oManifest, 'base', 'link-test'); + BackRestTestBackup_LinkCreate(\%oManifest, 'base', 'link-test', '/wrong'); + + # Remove an path + BackRestTestBackup_PathRemove(\%oManifest, 'base', 'path-test'); + + # Remove a file + BackRestTestBackup_FileRemove(\%oManifest, 'base', 'PG_VERSION'); + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, \%oManifest, undef, $bDelta, $bForce, + undef, undef, undef, undef, undef, undef, + 'add and delete files'); + + # Incr backup - add a tablespace + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'incr'; + BackRestTestBackup_ManifestReference(\%oManifest, $strFullBackup); + + # Add tablespace 1 + BackRestTestBackup_ManifestTablespaceCreate(\%oManifest, 1); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, "tablespace:1", 'tablespace1.txt', 'TBLSPC1', + 'd85de07d6421d90aa9191c11c889bfde43680f0f', $lTime); + + + my $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, + 'add tablespace 1'); + + # Resume Incr Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'incr'; + + # Move database from backup to temp + $strTmpPath = BackRestTestCommon_RepoPathGet() . "/temp/${strStanza}.tmp"; + + BackRestTestCommon_PathMove(BackRestTestCommon_RepoPathGet() . "/backup/${strStanza}/${strBackup}", + $strTmpPath, $bRemote); + + # Add tablespace 2 + BackRestTestBackup_ManifestTablespaceCreate(\%oManifest, 2); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, "tablespace:2", 'tablespace2.txt', 'TBLSPC2', + 'dc7f76e43c46101b47acc55ae4d593a9e6983578', $lTime); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, + 'resume and add tablespace 2', TEST_BACKUP_RESUME); + + # Resume Diff Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'diff'; + + $strTmpPath = BackRestTestCommon_RepoPathGet() . "/temp/${strStanza}.tmp"; + + BackRestTestCommon_PathMove(BackRestTestCommon_RepoPathGet() . "/backup/${strStanza}/${strBackup}", + $strTmpPath, $bRemote); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, + 'cannot resume - new diff', TEST_BACKUP_NORESUME); + + # Restore - + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = false; + + # Fail on used path + BackRestTestBackup_Restore($oFile, $strBackup, $strStanza, $bRemote, \%oManifest, undef, $bDelta, $bForce, + undef, undef, undef, undef, undef, undef, + 'fail on used path', ERROR_RESTORE_PATH_NOT_EMPTY); + # Fail on undef format + BackRestTestBackup_ManifestMunge($oFile, $bRemote, $strBackup, 'backup', 'format', undef, undef); + + BackRestTestBackup_Restore($oFile, $strBackup, $strStanza, $bRemote, \%oManifest, undef, $bDelta, $bForce, + undef, undef, undef, undef, undef, undef, + 'fail on undef format', ERROR_FORMAT); + + # Fail on mismatch format + BackRestTestBackup_ManifestMunge($oFile, $bRemote, $strBackup, 'backup', 'format', undef, 0); + + BackRestTestBackup_Restore($oFile, $strBackup, $strStanza, $bRemote, \%oManifest, undef, $bDelta, $bForce, + undef, undef, undef, undef, undef, undef, + 'fail on mismatch format', ERROR_FORMAT); + + BackRestTestBackup_ManifestMunge($oFile, $bRemote, $strBackup, 'backup', 'format', undef, 3); + + # Remap the base path + my %oRemapHash; + $oRemapHash{base} = BackRestTestCommon_DbCommonPathGet(2); + $oRemapHash{1} = BackRestTestCommon_DbTablespacePathGet(1, 2); + $oRemapHash{2} = BackRestTestCommon_DbTablespacePathGet(2, 2); + + BackRestTestBackup_Restore($oFile, $strBackup, $strStanza, $bRemote, \%oManifest, \%oRemapHash, $bDelta, $bForce, + undef, undef, undef, undef, undef, undef, + 'remap all paths'); + + # Incr Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'incr'; + BackRestTestBackup_ManifestReference(\%oManifest, $strBackup); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'base/base2.txt', 'BASE2', + '09b5e31766be1dba1ec27de82f975c1b6eea2a92', $lTime); + + BackRestTestBackup_ManifestTablespaceDrop(\%oManifest, 1, 2); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, "tablespace:2", 'tablespace2b.txt', 'TBLSPC2B', + 'e324463005236d83e6e54795dbddd20a74533bf3', $lTime); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, + 'add files and remove tablespace 2'); + + # Incr Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'incr'; + BackRestTestBackup_ManifestReference(\%oManifest, $strBackup); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'base/base1.txt', 'BASEUPDT', + '9a53d532e27785e681766c98516a5e93f096a501', $lTime); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, 'update files'); + + # Diff Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'diff'; + BackRestTestBackup_ManifestReference(\%oManifest, $strFullBackup, true); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, 'no updates'); + + # Incr Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'incr'; + BackRestTestBackup_ManifestReference(\%oManifest, $strBackup); + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, "remove files - but won't affect manifest", + true, true, 1); + BackRestTestCommon_ExecuteEnd(TEST_MANIFEST_BUILD); + + BackRestTestBackup_FileRemove(\%oManifest, 'base', 'base/base1.txt'); + + $strBackup = BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, \%oManifest, true); + + # Diff Backup + #----------------------------------------------------------------------------------------------------------------------- + BackRestTestBackup_ManifestReference(\%oManifest, $strFullBackup, true); + + $strType = 'diff'; + + BackRestTestBackup_ManifestFileRemove(\%oManifest, 'base', 'base/base1.txt'); + + BackRestTestBackup_ManifestFileRemove(\%oManifest, "tablespace:2", 'tablespace2b.txt', true); + BackRestTestBackup_ManifestFileCreate(\%oManifest, "tablespace:2", 'tablespace2c.txt', 'TBLSPC2C', + 'ad7df329ab97a1e7d35f1ff0351c079319121836', $lTime); + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, "remove files during backup", true, true, 1); + BackRestTestCommon_ExecuteEnd(TEST_MANIFEST_BUILD); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, "tablespace:2", 'tablespace2c.txt', 'TBLSPCBIGGER', + 'dfcb8679956b734706cf87259d50c88f83e80e66', $lTime); + + BackRestTestBackup_ManifestFileRemove(\%oManifest, 'base', 'base/base2.txt', true); + + $strBackup = BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, \%oManifest, true); + + # Full Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'full'; + BackRestTestBackup_ManifestReference(\%oManifest); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'base/base1.txt', 'BASEUPDT2', + '7579ada0808d7f98087a0a586d0df9de009cdc33', $lTime); + + $strFullBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest); + + # Diff Backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = 'diff'; + BackRestTestBackup_ManifestReference(\%oManifest, $strFullBackup); + + BackRestTestBackup_ManifestFileCreate(\%oManifest, 'base', 'base/base2.txt', 'BASE2UPDT', + 'cafac3c59553f2cfde41ce2e62e7662295f108c0', $lTime); + + $strBackup = BackRestTestBackup_BackupSynthetic($strType, $strStanza, $bRemote, $oFile, \%oManifest, 'add files'); + } + } + } + + if (BackRestTestCommon_Cleanup()) + { + &log(INFO, 'cleanup'); + BackRestTestBackup_Drop(true); } } #------------------------------------------------------------------------------------------------------------------------------- # Test full + # + # Check the entire backup mechanism using actual clusters. Only the archive and start/stop mechanisms need to be tested since + # everything else was tested in the backup test. #------------------------------------------------------------------------------------------------------------------------------- if ($strTest eq 'all' || $strTest eq 'full') { @@ -530,99 +2077,572 @@ sub BackRestTestBackup_Test for (my $bRemote = false; $bRemote <= true; $bRemote++) { - for (my $bLarge = false; $bLarge <= false; $bLarge++) + for (my $bArchiveAsync = false; $bArchiveAsync <= true; $bArchiveAsync++) { - for (my $bCompress = false; $bCompress <= false; $bCompress++) + for (my $bCompress = false; $bCompress <= true; $bCompress++) + { + # Increment the run, log, and decide whether this unit test should be run + if (!BackRestTestCommon_Run(++$iRun, + "rmt ${bRemote}, arc_async ${bArchiveAsync}, cmp ${bCompress}")) {next} + + # Create the file object + my $oFile = new BackRest::File + ( + $strStanza, + BackRestTestCommon_RepoPathGet(), + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal + ); + + # Create the test directory + if ($bCreate) { - for (my $bChecksum = false; $bChecksum <= false; $bChecksum++) + BackRestTestBackup_Create($bRemote); + $bCreate = false; + } + + # Create db config + BackRestTestCommon_ConfigCreate('db', # local + $bRemote ? BACKUP : undef, # remote + $bCompress, # compress + undef, # checksum + $bRemote ? undef : true, # hardlink + $iThreadMax, # thread-max + $bArchiveAsync, # archive-async + undef); # compress-async + + # Create backup config + if ($bRemote) { - for (my $bHardlink = false; $bHardlink <= true; $bHardlink++) - { - for (my $bArchiveAsync = false; $bArchiveAsync <= $bRemote; $bArchiveAsync++) - { - # Increment the run, log, and decide whether this unit test should be run - if (!BackRestTestCommon_Run(++$iRun, - "rmt ${bRemote}, lrg ${bLarge}, cmp ${bCompress}, chk ${bChecksum}, " . - "hardlink ${bHardlink}, arc_async ${bArchiveAsync}")) {next} - - # Create the test directory - if ($bCreate) - { - BackRestTestBackup_Create($bRemote); - $bCreate = false; - } - - # Create db config - BackRestTestCommon_ConfigCreate('db', # local - $bRemote ? REMOTE_BACKUP : undef, # remote - $bCompress, # compress - $bChecksum, # checksum - defined($bRemote) ? undef : $bHardlink, # hardlink - defined($bRemote) ? undef : $iThreadMax, # thread-max - $bArchiveAsync, # archive-async - undef); # compress-async - - # Create backup config - if ($bRemote) - { - BackRestTestCommon_ConfigCreate('backup', # local - $bRemote ? REMOTE_DB : undef, # remote - $bCompress, # compress - $bChecksum, # checksum - $bHardlink, # hardlink - $iThreadMax, # thread-max - undef, # archive-async - undef); # compress-async - } - - # Create the backup command - my $strCommand = BackRestTestCommon_CommandMainGet() . ' --config=' . - ($bRemote ? BackRestTestCommon_BackupPathGet() : BackRestTestCommon_DbPathGet()) . - "/pg_backrest.conf --test --type=incr --stanza=${strStanza} backup"; - - - # Run the full/incremental tests - for (my $iFull = 1; $iFull <= 1; $iFull++) - { - - for (my $iIncr = 0; $iIncr <= 2; $iIncr++) - { - &log(INFO, ' ' . ($iIncr == 0 ? ('full ' . sprintf('%02d', $iFull)) : - (' incr ' . sprintf('%02d', $iIncr)))); - - # Create a table in each backup to check references - BackRestTestBackup_PgExecute("create table test_backup_${iIncr} (id int)", true); - - # Create a table to be dropped to test missing file code - BackRestTestBackup_PgExecute('create table test_drop (id int)'); - - BackRestTestCommon_ExecuteBegin($strCommand, $bRemote); - - if (BackRestTestCommon_ExecuteEnd(TEST_MANIFEST_BUILD)) - { - BackRestTestBackup_PgExecute('drop table test_drop', true); - - BackRestTestCommon_ExecuteEnd(); - } - else - { - confess &log(ERROR, 'test point ' . TEST_MANIFEST_BUILD . ' was not found'); - } - } - } - - $bCreate = true; - } - } - } + BackRestTestCommon_ConfigCreate('backup', # local + $bRemote ? DB : undef, # remote + $bCompress, # compress + undef, # checksum + true, # hardlink + $iThreadMax, # thread-max + undef, # archive-async + undef); # compress-async } + + # Static backup parameters + my $bSynthetic = false; + my $fTestDelay = .1; + + # Variable backup parameters + my $bDelta = true; + my $bForce = false; + my $strType = undef; + my $strTarget = undef; + my $bTargetExclusive = false; + my $bTargetResume = false; + my $strTargetTimeline = undef; + my $oRecoveryHashRef = undef; + my $strTestPoint = undef; + my $strComment = undef; + my $iExpectedExitStatus = undef; + + # Restore test string + my $strDefaultMessage = 'default'; + my $strFullMessage = 'full'; + my $strIncrMessage = 'incr'; + my $strTimeMessage = 'time'; + my $strXidMessage = 'xid'; + my $strNameMessage = 'name'; + my $strTimelineMessage = 'timeline3'; + + # Full backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = BACKUP_TYPE_FULL; + $strTestPoint = TEST_MANIFEST_BUILD; + $strComment = 'insert during backup'; + + BackRestTestBackup_PgExecute("create table test (message text not null)"); + BackRestTestBackup_PgSwitchXlog(); + BackRestTestBackup_PgExecute("insert into test values ('$strDefaultMessage')"); + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, $strComment, $bSynthetic, + defined($strTestPoint), $fTestDelay); + BackRestTestCommon_ExecuteEnd($strTestPoint); + + BackRestTestBackup_PgExecute("update test set message = '$strFullMessage'", false); + + my $strFullBackup = BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, undef, $bSynthetic); + + # Setup the time target + #----------------------------------------------------------------------------------------------------------------------- + BackRestTestBackup_PgExecute("update test set message = '$strTimeMessage'", false); + BackRestTestBackup_PgSwitchXlog(); + my $strTimeTarget = BackRestTestBackup_PgSelectOne("select to_char(current_timestamp, 'YYYY-MM-DD HH24:MI:SS.US TZ')"); + &log(INFO, " time target is ${strTimeTarget}"); + + # Incr backup + #----------------------------------------------------------------------------------------------------------------------- + $strType = BACKUP_TYPE_INCR; + $strTestPoint = TEST_MANIFEST_BUILD; + $strComment = 'update during backup'; + + BackRestTestBackup_PgExecute("create table test_remove (id int)", false); + BackRestTestBackup_PgSwitchXlog(); + BackRestTestBackup_PgExecute("update test set message = '$strDefaultMessage'", false); + BackRestTestBackup_PgSwitchXlog(); + + BackRestTestBackup_BackupBegin($strType, $strStanza, $bRemote, $strComment, $bSynthetic, + defined($strTestPoint), $fTestDelay); + BackRestTestCommon_ExecuteEnd($strTestPoint); + + BackRestTestBackup_PgExecute("drop table test_remove", false); + BackRestTestBackup_PgSwitchXlog(); + BackRestTestBackup_PgExecute("update test set message = '$strIncrMessage'", false); + + my $strIncrBackup = BackRestTestBackup_BackupEnd($strType, $oFile, $bRemote, undef, undef, $bSynthetic); + + # Setup the xid target + #----------------------------------------------------------------------------------------------------------------------- + BackRestTestBackup_PgExecute("update test set message = '$strXidMessage'", false, false); + BackRestTestBackup_PgSwitchXlog(); + my $strXidTarget = BackRestTestBackup_PgSelectOne("select txid_current()"); + BackRestTestBackup_PgCommit(); + &log(INFO, " xid target is ${strXidTarget}"); + + # Setup the name target + #----------------------------------------------------------------------------------------------------------------------- + my $strNameTarget = 'backrest'; + + BackRestTestBackup_PgExecute("update test set message = '$strNameMessage'", false, true); + BackRestTestBackup_PgSwitchXlog(); + BackRestTestBackup_PgExecute("select pg_create_restore_point('${strNameTarget}')", false, false); + + &log(INFO, " name target is ${strNameTarget}"); + + # Restore (type = default) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = false; + $bForce = false; + $strType = RECOVERY_TYPE_DEFAULT; + $strTarget = undef; + $bTargetExclusive = undef; + $bTargetResume = undef; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + # Expect failure because postmaster.pid exists + $strComment = 'postmaster running'; + $iExpectedExitStatus = ERROR_POSTMASTER_RUNNING; + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStop(); + + # Expect failure because db path is not empty + $strComment = 'path not empty'; + $iExpectedExitStatus = ERROR_RESTORE_PATH_NOT_EMPTY; + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + # Drop and recreate db path + BackRestTestCommon_PathRemove(BackRestTestCommon_DbCommonPathGet()); + BackRestTestCommon_PathCreate(BackRestTestCommon_DbCommonPathGet()); + + # Now the restore should work + $strComment = undef; + $iExpectedExitStatus = undef; + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strNameMessage); + + # Restore (restore type = xid, inclusive) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = false; + $bForce = true; + $strType = RECOVERY_TYPE_XID; + $strTarget = $strXidTarget; + $bTargetExclusive = undef; + $bTargetResume = true; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + # Save recovery file to test so we can use it in the next test + $oFile->copy(PATH_ABSOLUTE, BackRestTestCommon_DbCommonPathGet() . '/recovery.conf', + PATH_ABSOLUTE, BackRestTestCommon_TestPathGet() . '/recovery.conf'); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strXidMessage); + + BackRestTestBackup_PgExecute("update test set message = '$strTimelineMessage'", false); + + # Restore (restore type = preserve, inclusive) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = true; + $bForce = false; + $strType = RECOVERY_TYPE_PRESERVE; + $strTarget = undef; + $bTargetExclusive = undef; + $bTargetResume = undef; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + # Restore recovery file that was save in last test + $oFile->move(PATH_ABSOLUTE, BackRestTestCommon_TestPathGet() . '/recovery.conf', + PATH_ABSOLUTE, BackRestTestCommon_DbCommonPathGet() . '/recovery.conf'); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strXidMessage); + + BackRestTestBackup_PgExecute("update test set message = '$strTimelineMessage'", false); + + # Restore (restore type = time, inclusive) - there is no exclusive time test because I can't find a way to find the + # exact commit time of a transaction. + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = true; + $bForce = false; + $strType = RECOVERY_TYPE_TIME; + $strTarget = $strTimeTarget; + $bTargetExclusive = undef; + $bTargetResume = undef; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strTimeMessage); + + # Restore (restore type = xid, exclusive) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = true; + $bForce = false; + $strType = RECOVERY_TYPE_XID; + $strTarget = $strXidTarget; + $bTargetExclusive = true; + $bTargetResume = undef; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strIncrMessage); + + # Restore (restore type = name) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = true; + $bForce = true; + $strType = RECOVERY_TYPE_NAME; + $strTarget = $strNameTarget; + $bTargetExclusive = undef; + $bTargetResume = undef; + $strTargetTimeline = undef; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(); + BackRestTestBackup_PgSelectOneTest('select message from test', $strNameMessage); + + # Restore (restore type = default, timeline = 3) + #----------------------------------------------------------------------------------------------------------------------- + $bDelta = true; + $bForce = false; + $strType = RECOVERY_TYPE_DEFAULT; + $strTarget = undef; + $bTargetExclusive = undef; + $bTargetResume = undef; + $strTargetTimeline = 3; + $oRecoveryHashRef = {'standy-mode' => 'on'}; + $oRecoveryHashRef = undef; + $strComment = undef; + $iExpectedExitStatus = undef; + + &log(INFO, " testing recovery type = ${strType}"); + + BackRestTestBackup_ClusterStop(); + + BackRestTestBackup_Restore($oFile, $strFullBackup, $strStanza, $bRemote, undef, undef, $bDelta, $bForce, + $strType, $strTarget, $bTargetExclusive, $bTargetResume, $strTargetTimeline, + $oRecoveryHashRef, $strComment, $iExpectedExitStatus); + + BackRestTestBackup_ClusterStart(undef, undef, true); + BackRestTestBackup_PgSelectOneTest('select message from test', $strTimelineMessage, 120); + + $bCreate = true; + } } } if (BackRestTestCommon_Cleanup()) { &log(INFO, 'cleanup'); - BackRestTestBackup_Drop(); + BackRestTestBackup_Drop(true); + } + } + + #------------------------------------------------------------------------------------------------------------------------------- + # Test collision + # + # See if it is possible for a table to be written to, have stop backup run, and be written to again all in the same second. + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'collision') + { + $iRun = 0; + my $iRunMax = 1000; + + &log(INFO, "Test Backup Collision\n"); + + # Create the file object + my $oFile = (BackRest::File->new + ( + $strStanza, + BackRestTestCommon_RepoPathGet(), + undef, + undef + ))->clone(); + + # Create the test database + BackRestTestBackup_Create(false); + + # Create the config file + BackRestTestCommon_ConfigCreate('db', # local + undef, # remote + false, # compress + false, # checksum + false, # hardlink + $iThreadMax, # thread-max + false, # archive-async + undef); # compress-async + + # Create the test table + BackRestTestBackup_PgExecute("create table test_collision (id int)"); + + # Construct filename to test + my $strFile = BackRestTestCommon_DbCommonPathGet() . "/base"; + + # Get the oid of the postgres db + my $strSql = "select oid from pg_database where datname = 'postgres'"; + my $hStatement = $hDb->prepare($strSql); + + $hStatement->execute() or + confess &log(ERROR, "Unable to execute: ${strSql}"); + + my @oyRow = $hStatement->fetchrow_array(); + $strFile .= '/' . $oyRow[0]; + + $hStatement->finish(); + + # Get the oid of the new table so we can check the file on disk + $strSql = "select oid from pg_class where relname = 'test_collision'"; + $hStatement = $hDb->prepare($strSql); + + $hStatement->execute() or + confess &log(ERROR, "Unable to execute: ${strSql}"); + + @oyRow = $hStatement->fetchrow_array(); + $strFile .= '/' . $oyRow[0]; + + &log(INFO, 'table filename = ' . $strFile); + + $hStatement->finish(); + + BackRestTestBackup_PgExecute("select pg_start_backup('test');"); + + # File modified in the same second after the manifest is taken and file is copied + while ($iRun < $iRunMax) + { + # Increment the run, log, and decide whether this unit test should be run + if (!BackRestTestCommon_Run(++$iRun, + "mod after manifest")) {next} + + my $strTestChecksum = $oFile->hash(PATH_DB_ABSOLUTE, $strFile); + + # Insert a row and do a checkpoint + BackRestTestBackup_PgExecute("insert into test_collision values (1)", true); + + # Stat the file to get size/modtime after the backup has started + my $strBeginChecksum = $oFile->hash(PATH_DB_ABSOLUTE, $strFile); + my $oStat = lstat($strFile); + my $lBeginSize = $oStat->size; + my $lBeginTime = $oStat->mtime; + + # Sleep .5 seconds to give a reasonable amount of time for the file to be copied after the manifest was generated + # Sleep for a while to show there is a large window where this can happen + &log(INFO, 'time ' . gettimeofday()); + hsleep(.5); + &log(INFO, 'time ' . gettimeofday()); + + # Insert another row + BackRestTestBackup_PgExecute("insert into test_collision values (1)"); + + # Stop backup, start a new backup + BackRestTestBackup_PgExecute("select pg_stop_backup();"); + BackRestTestBackup_PgExecute("select pg_start_backup('test');"); + + # Stat the file to get size/modtime after the backup has restarted + my $strEndChecksum = $oFile->hash(PATH_DB_ABSOLUTE, $strFile); + $oStat = lstat($strFile); + my $lEndSize = $oStat->size; + my $lEndTime = $oStat->mtime; + + # Error if the size/modtime are the same between the backups + &log(INFO, " begin size = ${lBeginSize}, time = ${lBeginTime}, hash ${strBeginChecksum} - " . + "end size = ${lEndSize}, time = ${lEndTime}, hash ${strEndChecksum} - test hash ${strTestChecksum}"); + + if ($lBeginSize == $lEndSize && $lBeginTime == $lEndTime && + $strTestChecksum ne $strBeginChecksum && $strBeginChecksum ne $strEndChecksum) + { + &log(ERROR, "size and mod time are the same between backups"); + $iRun = $iRunMax; + next; + } + } + + BackRestTestBackup_PgExecute("select pg_stop_backup();"); + + if (BackRestTestCommon_Cleanup()) + { + &log(INFO, 'cleanup'); + BackRestTestBackup_Drop(true); + } + } + + #------------------------------------------------------------------------------------------------------------------------------- + # rsync-collision + # + # See if it is possible for a table to be written to, have stop backup run, and be written to again all in the same second. + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'rsync-collision') + { + $iRun = 0; + my $iRunMax = 1000; + + &log(INFO, "Test Rsync Collision\n"); + + # Create the file object + my $oFile = (BackRest::File->new + ( + $strStanza, + BackRestTestCommon_RepoPathGet(), + undef, + undef + ))->clone(); + + # Create the test database + BackRestTestBackup_Create(false, false); + + # Create test paths + my $strPathRsync1 = BackRestTestCommon_TestPathGet() . "/rsync1"; + my $strPathRsync2 = BackRestTestCommon_TestPathGet() . "/rsync2"; + + BackRestTestCommon_PathCreate($strPathRsync1); + BackRestTestCommon_PathCreate($strPathRsync2); + + # Rsync command + my $strCommand = "rsync -vvrt ${strPathRsync1}/ ${strPathRsync2}"; + + # File modified in the same second after the manifest is taken and file is copied + while ($iRun < $iRunMax) + { + # Increment the run, log, and decide whether this unit test should be run + if (!BackRestTestCommon_Run(++$iRun, + "rsync test")) {next} + + # Create test file + &log(INFO, "create test file"); + BackRestTestCommon_FileCreate("${strPathRsync1}/test.txt", 'TEST1'); + + # Stat the file to get size/modtime after the backup has started + my $strBeginChecksum = $oFile->hash(PATH_DB_ABSOLUTE, "${strPathRsync1}/test.txt"); + + # Rsync + &log(INFO, "rsync 1st time"); + BackRestTestCommon_Execute($strCommand, false, false, true); + + # Sleep for a while to show there is a large window where this can happen + &log(INFO, 'time ' . gettimeofday()); + hsleep(.5); + &log(INFO, 'time ' . gettimeofday()); + + # Modify the test file within the same second + &log(INFO, "modify test file"); + BackRestTestCommon_FileCreate("${strPathRsync1}/test.txt", 'TEST2'); + + my $strEndChecksum = $oFile->hash(PATH_DB_ABSOLUTE, "${strPathRsync1}/test.txt"); + + # Rsync again + &log(INFO, "rsync 2nd time"); + BackRestTestCommon_Execute($strCommand, false, false, true); + + my $strTestChecksum = $oFile->hash(PATH_DB_ABSOLUTE, "${strPathRsync2}/test.txt"); + + # Error if checksums are not the same after rsync + &log(INFO, " begin hash ${strBeginChecksum} - end hash ${strEndChecksum} - test hash ${strTestChecksum}"); + + if ($strTestChecksum ne $strEndChecksum) + { + &log(ERROR, "end and test checksums are not the same"); + $iRun = $iRunMax; + next; + } + } + + if (BackRestTestCommon_Cleanup()) + { + &log(INFO, 'cleanup'); + BackRestTestBackup_Drop(true); } } } diff --git a/test/lib/BackRestTest/CommonTest.pm b/test/lib/BackRestTest/CommonTest.pm index 006ec9352..23ec01fb2 100755 --- a/test/lib/BackRestTest/CommonTest.pm +++ b/test/lib/BackRestTest/CommonTest.pm @@ -8,28 +8,36 @@ package BackRestTest::CommonTest; # Perl includes #################################################################################################################################### use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; +use File::Path qw(remove_tree); use Cwd 'abs_path'; use IPC::Open3; use POSIX ':sys_wait_h'; use IO::Select; +use File::Copy qw(move); use lib dirname($0) . '/../lib'; use BackRest::Utility; +use BackRest::Remote; use BackRest::File; +use BackRest::Manifest; use Exporter qw(import); -our @EXPORT = qw(BackRestTestCommon_Setup BackRestTestCommon_ExecuteBegin BackRestTestCommon_ExecuteEnd - BackRestTestCommon_Execute BackRestTestCommon_ExecuteBackRest - BackRestTestCommon_ConfigCreate BackRestTestCommon_Run BackRestTestCommon_Cleanup - BackRestTestCommon_PgSqlBinPathGet BackRestTestCommon_StanzaGet BackRestTestCommon_CommandMainGet - BackRestTestCommon_CommandRemoteGet BackRestTestCommon_HostGet BackRestTestCommon_UserGet - BackRestTestCommon_GroupGet BackRestTestCommon_UserBackRestGet BackRestTestCommon_TestPathGet - BackRestTestCommon_DataPathGet BackRestTestCommon_BackupPathGet BackRestTestCommon_ArchivePathGet - BackRestTestCommon_DbPathGet BackRestTestCommon_DbCommonPathGet BackRestTestCommon_DbPortGet); +our @EXPORT = qw(BackRestTestCommon_Create BackRestTestCommon_Drop BackRestTestCommon_Setup BackRestTestCommon_ExecuteBegin + BackRestTestCommon_ExecuteEnd BackRestTestCommon_Execute BackRestTestCommon_ExecuteBackRest + BackRestTestCommon_PathCreate BackRestTestCommon_PathMode BackRestTestCommon_PathRemove + BackRestTestCommon_FileCreate BackRestTestCommon_FileRemove BackRestTestCommon_PathCopy BackRestTestCommon_PathMove + BackRestTestCommon_ConfigCreate BackRestTestCommon_ConfigRemap BackRestTestCommon_ConfigRecovery + BackRestTestCommon_Run BackRestTestCommon_Cleanup BackRestTestCommon_PgSqlBinPathGet + BackRestTestCommon_StanzaGet BackRestTestCommon_CommandMainGet BackRestTestCommon_CommandRemoteGet + BackRestTestCommon_HostGet BackRestTestCommon_UserGet BackRestTestCommon_GroupGet + BackRestTestCommon_UserBackRestGet BackRestTestCommon_TestPathGet BackRestTestCommon_DataPathGet + BackRestTestCommon_RepoPathGet BackRestTestCommon_LocalPathGet BackRestTestCommon_DbPathGet + BackRestTestCommon_DbCommonPathGet BackRestTestCommon_ClusterStop BackRestTestCommon_DbTablespacePathGet + BackRestTestCommon_DbPortGet); my $strPgSqlBin; my $strCommonStanza; @@ -42,10 +50,11 @@ my $strCommonGroup; my $strCommonUserBackRest; my $strCommonTestPath; my $strCommonDataPath; -my $strCommonBackupPath; -my $strCommonArchivePath; +my $strCommonRepoPath; +my $strCommonLocalPath; my $strCommonDbPath; my $strCommonDbCommonPath; +my $strCommonDbTablespacePath; my $iCommonDbPort; my $iModuleTestRun; my $bDryRun; @@ -59,8 +68,58 @@ my $hOut; my $pId; my $strCommand; + #################################################################################################################################### -# BackRestTestBackup_Run +# BackRestTestCommon_ClusterStop +#################################################################################################################################### +sub BackRestTestCommon_ClusterStop +{ + my $strPath = shift; + my $bImmediate = shift; + + # Set default + $strPath = defined($strPath) ? $strPath : BackRestTestCommon_DbCommonPathGet(); + $bImmediate = defined($bImmediate) ? $bImmediate : false; + + # If postmaster process is running then stop the cluster + if (-e $strPath . '/postmaster.pid') + { + BackRestTestCommon_Execute(BackRestTestCommon_PgSqlBinPathGet() . "/pg_ctl stop -D ${strPath} -w -s -m " . + ($bImmediate ? 'immediate' : 'fast')); + } +} + +#################################################################################################################################### +# BackRestTestCommon_Drop +#################################################################################################################################### +sub BackRestTestCommon_Drop +{ + # Drop the cluster if it exists + BackRestTestCommon_ClusterStop(BackRestTestCommon_DbCommonPathGet(), true); + + # Remove the backrest private directory + while (-e BackRestTestCommon_RepoPathGet()) + { + BackRestTestCommon_PathRemove(BackRestTestCommon_RepoPathGet(), true, true); + BackRestTestCommon_PathRemove(BackRestTestCommon_RepoPathGet(), false, true); + hsleep(.1); + } + + # Remove the test directory + BackRestTestCommon_PathRemove(BackRestTestCommon_TestPathGet()); +} + +#################################################################################################################################### +# BackRestTestCommon_Create +#################################################################################################################################### +sub BackRestTestCommon_Create +{ + # Create the test directory + BackRestTestCommon_PathCreate(BackRestTestCommon_TestPathGet(), '0770'); +} + +#################################################################################################################################### +# BackRestTestCommon_Run #################################################################################################################################### sub BackRestTestCommon_Run { @@ -83,7 +142,7 @@ sub BackRestTestCommon_Run } #################################################################################################################################### -# BackRestTestBackup_Cleanup +# BackRestTestCommon_Cleanup #################################################################################################################################### sub BackRestTestCommon_Cleanup { @@ -91,7 +150,7 @@ sub BackRestTestCommon_Cleanup } #################################################################################################################################### -# BackRestTestBackup_ExecuteBegin +# BackRestTestCommon_ExecuteBegin #################################################################################################################################### sub BackRestTestCommon_ExecuteBegin { @@ -122,15 +181,18 @@ sub BackRestTestCommon_ExecuteBegin } #################################################################################################################################### -# BackRestTestBackup_ExecuteEnd +# BackRestTestCommon_ExecuteEnd #################################################################################################################################### sub BackRestTestCommon_ExecuteEnd { my $strTest = shift; my $bSuppressError = shift; + my $bShowOutput = shift; + my $iExpectedExitStatus = shift; # Set defaults $bSuppressError = defined($bSuppressError) ? $bSuppressError : false; + $bShowOutput = defined($bShowOutput) ? $bShowOutput : false; # Create select objects my $oErrorSelect = IO::Select->new(); @@ -169,15 +231,34 @@ sub BackRestTestCommon_ExecuteEnd # Check the exit status and output an error if needed my $iExitStatus = ${^CHILD_ERROR_NATIVE} >> 8; - if ($iExitStatus != 0 && !$bSuppressError) + if (defined($iExpectedExitStatus) && $iExitStatus == $iExpectedExitStatus) { - confess &log(ERROR, "command '${strCommand}' returned " . $iExitStatus . "\n" . - ($strOutLog ne '' ? "STDOUT:\n${strOutLog}" : '') . - ($strErrorLog ne '' ? "STDERR:\n${strErrorLog}" : '')); + return $iExitStatus; } - else + + if ($iExitStatus != 0 || (defined($iExpectedExitStatus) && $iExitStatus != $iExpectedExitStatus)) { - &log(DEBUG, "suppressed error was ${iExitStatus}"); + if ($bSuppressError) + { + &log(DEBUG, "suppressed error was ${iExitStatus}"); + } + else + { + confess &log(ERROR, "command '${strCommand}' returned " . $iExitStatus . + (defined($iExpectedExitStatus) ? ", but ${iExpectedExitStatus} was expected" : '') . "\n" . + ($strOutLog ne '' ? "STDOUT:\n${strOutLog}" : '') . + ($strErrorLog ne '' ? "STDERR:\n${strErrorLog}" : '')); + } + } + + if ($bShowOutput) + { + print "output:\n${strOutLog}\n"; + } + + if (defined($strTest)) + { + confess &log(ASSERT, "test point ${strTest} was not found"); } $hError = undef; @@ -187,16 +268,159 @@ sub BackRestTestCommon_ExecuteEnd } #################################################################################################################################### -# BackRestTestBackup_Execute +# BackRestTestCommon_Execute #################################################################################################################################### sub BackRestTestCommon_Execute { my $strCommand = shift; my $bRemote = shift; my $bSuppressError = shift; + my $bShowOutput = shift; + my $iExpectedExitStatus = shift; BackRestTestCommon_ExecuteBegin($strCommand, $bRemote); - return BackRestTestCommon_ExecuteEnd(undef, $bSuppressError); + return BackRestTestCommon_ExecuteEnd(undef, $bSuppressError, $bShowOutput, $iExpectedExitStatus); +} + +#################################################################################################################################### +# BackRestTestCommon_PathCreate +# +# Create a path and set mode. +#################################################################################################################################### +sub BackRestTestCommon_PathCreate +{ + my $strPath = shift; + my $strMode = shift; + + # Create the path + mkdir($strPath) + or confess "unable to create ${strPath} path"; + + # Set the mode + chmod(oct(defined($strMode) ? $strMode : '0700'), $strPath) + or confess 'unable to set mode ${strMode} for ${strPath}'; +} + +#################################################################################################################################### +# BackRestTestCommon_PathMode +# +# Set mode of an existing path. +#################################################################################################################################### +sub BackRestTestCommon_PathMode +{ + my $strPath = shift; + my $strMode = shift; + + # Set the mode + chmod(oct($strMode), $strPath) + or confess 'unable to set mode ${strMode} for ${strPath}'; +} + +#################################################################################################################################### +# BackRestTestCommon_PathRemove +# +# Remove a path and all subpaths. +#################################################################################################################################### +sub BackRestTestCommon_PathRemove +{ + my $strPath = shift; + my $bRemote = shift; + my $bSuppressError = shift; + + BackRestTestCommon_Execute('rm -rf ' . $strPath, $bRemote, $bSuppressError); + + # remove_tree($strPath, {result => \my $oError}); + # + # if (@$oError) + # { + # my $strMessage = "error(s) occurred while removing ${strPath}:"; + # + # for my $strFile (@$oError) + # { + # $strMessage .= "\nunable to remove: " . $strFile; + # } + # + # confess $strMessage; + # } +} + +#################################################################################################################################### +# BackRestTestCommon_PathCopy +# +# Copy a path. +#################################################################################################################################### +sub BackRestTestCommon_PathCopy +{ + my $strSourcePath = shift; + my $strDestinationPath = shift; + my $bRemote = shift; + my $bSuppressError = shift; + + BackRestTestCommon_Execute("cp -rp ${strSourcePath} ${strDestinationPath}", $bRemote, $bSuppressError); +} + +#################################################################################################################################### +# BackRestTestCommon_PathMove +# +# Copy a path. +#################################################################################################################################### +sub BackRestTestCommon_PathMove +{ + my $strSourcePath = shift; + my $strDestinationPath = shift; + my $bRemote = shift; + my $bSuppressError = shift; + + BackRestTestCommon_PathCopy($strSourcePath, $strDestinationPath, $bRemote, $bSuppressError); + BackRestTestCommon_PathRemove($strSourcePath, $bRemote, $bSuppressError); +} + +#################################################################################################################################### +# BackRestTestCommon_FileCreate +# +# Create a file specifying content, mode, and time. +#################################################################################################################################### +sub BackRestTestCommon_FileCreate +{ + my $strFile = shift; + my $strContent = shift; + my $lTime = shift; + my $strMode = shift; + + # Open the file and save strContent to it + my $hFile = shift; + + open($hFile, '>', $strFile) + or confess "unable to open ${strFile} for writing"; + + syswrite($hFile, $strContent) + or confess "unable to write to ${strFile}: $!"; + + close($hFile); + + # Set the time + if (defined($lTime)) + { + utime($lTime, $lTime, $strFile) + or confess 'unable to set time ${lTime} for ${strPath}'; + } + + # Set the mode + chmod(oct(defined($strMode) ? $strMode : '0600'), $strFile) + or confess 'unable to set mode ${strMode} for ${strFile}'; +} + +#################################################################################################################################### +# BackRestTestCommon_FileRemove +# +# Remove a file. +#################################################################################################################################### +sub BackRestTestCommon_FileRemove +{ + my $strFile = shift; + + unlink($strFile) + or confess "unable to remove ${strFile}: $!"; } #################################################################################################################################### @@ -230,10 +454,11 @@ sub BackRestTestCommon_Setup } $strCommonDataPath = "${strBasePath}/test/data"; - $strCommonBackupPath = "${strCommonTestPath}/backrest"; - $strCommonArchivePath = "${strCommonTestPath}/archive"; + $strCommonRepoPath = "${strCommonTestPath}/backrest"; + $strCommonLocalPath = "${strCommonTestPath}/local"; $strCommonDbPath = "${strCommonTestPath}/db"; $strCommonDbCommonPath = "${strCommonTestPath}/db/common"; + $strCommonDbTablespacePath = "${strCommonTestPath}/db/tablespace"; $strCommonCommandMain = "${strBasePath}/bin/pg_backrest.pl"; $strCommonCommandRemote = "${strBasePath}/bin/pg_backrest_remote.pl"; @@ -245,6 +470,116 @@ sub BackRestTestCommon_Setup $bNoCleanup = $bNoCleanupParam; } +#################################################################################################################################### +# BackRestTestCommon_ConfigRemap +#################################################################################################################################### +sub BackRestTestCommon_ConfigRemap +{ + my $oRemapHashRef = shift; + my $oManifestRef = shift; + my $bRemote = shift; + + # Create config filename + my $strConfigFile = BackRestTestCommon_DbPathGet() . '/pg_backrest.conf'; + my $strStanza = BackRestTestCommon_StanzaGet(); + + # Load Config file + my %oConfig; + ini_load($strConfigFile, \%oConfig); + + # Load remote config file + my %oRemoteConfig; + my $strRemoteConfigFile = BackRestTestCommon_TestPathGet() . '/pg_backrest.conf.remote'; + + if ($bRemote) + { + BackRestTestCommon_Execute("mv " . BackRestTestCommon_RepoPathGet() . "/pg_backrest.conf ${strRemoteConfigFile}", true); + ini_load($strRemoteConfigFile, \%oRemoteConfig); + } + + # Rewrite remap section + delete($oConfig{"${strStanza}:restore:tablespace-map"}); + + foreach my $strRemap (sort(keys $oRemapHashRef)) + { + my $strRemapPath = ${$oRemapHashRef}{$strRemap}; + + if ($strRemap eq 'base') + { + $oConfig{$strStanza}{'db-path'} = $strRemapPath; + ${$oManifestRef}{'backup:path'}{base} = $strRemapPath; + + if ($bRemote) + { + $oRemoteConfig{$strStanza}{'db-path'} = $strRemapPath; + } + } + else + { + $oConfig{"${strStanza}:restore:tablespace-map"}{$strRemap} = $strRemapPath; + + ${$oManifestRef}{'backup:path'}{"tablespace:${strRemap}"} = $strRemapPath; + ${$oManifestRef}{'backup:tablespace'}{$strRemap}{'path'} = $strRemapPath; + ${$oManifestRef}{'base:link'}{"pg_tblspc/${strRemap}"}{'link_destination'} = $strRemapPath; + } + } + + # Resave the config file + ini_save($strConfigFile, \%oConfig); + + # Load remote config file + if ($bRemote) + { + ini_save($strRemoteConfigFile, \%oRemoteConfig); + BackRestTestCommon_Execute("mv ${strRemoteConfigFile} " . BackRestTestCommon_RepoPathGet() . '/pg_backrest.conf', true); + } +} + +#################################################################################################################################### +# BackRestTestCommon_ConfigRecovery +#################################################################################################################################### +sub BackRestTestCommon_ConfigRecovery +{ + my $oRecoveryHashRef = shift; + my $bRemote = shift; + + # Create config filename + my $strConfigFile = BackRestTestCommon_DbPathGet() . '/pg_backrest.conf'; + my $strStanza = BackRestTestCommon_StanzaGet(); + + # Load Config file + my %oConfig; + ini_load($strConfigFile, \%oConfig); + + # Load remote config file + my %oRemoteConfig; + my $strRemoteConfigFile = BackRestTestCommon_TestPathGet() . '/pg_backrest.conf.remote'; + + if ($bRemote) + { + BackRestTestCommon_Execute("mv " . BackRestTestCommon_RepoPathGet() . "/pg_backrest.conf ${strRemoteConfigFile}", true); + ini_load($strRemoteConfigFile, \%oRemoteConfig); + } + + # Rewrite remap section + delete($oConfig{"${strStanza}:recovery:option"}); + + foreach my $strOption (sort(keys $oRecoveryHashRef)) + { + $oConfig{"${strStanza}:recovery:option"}{$strOption} = ${$oRecoveryHashRef}{$strOption}; + } + + # Resave the config file + ini_save($strConfigFile, \%oConfig); + + # Load remote config file + if ($bRemote) + { + ini_save($strRemoteConfigFile, \%oRemoteConfig); + BackRestTestCommon_Execute("mv ${strRemoteConfigFile} " . BackRestTestCommon_RepoPathGet() . '/pg_backrest.conf', true); + } +} + #################################################################################################################################### # BackRestTestCommon_ConfigCreate #################################################################################################################################### @@ -256,54 +591,64 @@ sub BackRestTestCommon_ConfigCreate my $bChecksum = shift; my $bHardlink = shift; my $iThreadMax = shift; - my $bArchiveLocal = shift; + my $bArchiveAsync = shift; my $bCompressAsync = shift; my %oParamHash; if (defined($strRemote)) { - $oParamHash{'global:command'}{'remote'} = $strCommonCommandRemote; + $oParamHash{'global:command'}{'cmd-remote'} = $strCommonCommandRemote; } - $oParamHash{'global:command'}{'psql'} = $strCommonCommandPsql; + $oParamHash{'global:command'}{'cmd-psql'} = $strCommonCommandPsql; - if (defined($strRemote) && $strRemote eq REMOTE_BACKUP) + if (defined($strRemote) && $strRemote eq BACKUP) { - $oParamHash{'global:backup'}{'host'} = $strCommonHost; - $oParamHash{'global:backup'}{'user'} = $strCommonUserBackRest; + $oParamHash{'global:backup'}{'backup-host'} = $strCommonHost; + $oParamHash{'global:backup'}{'backup-user'} = $strCommonUserBackRest; } - elsif (defined($strRemote) && $strRemote eq REMOTE_DB) + elsif (defined($strRemote) && $strRemote eq DB) { - $oParamHash{$strCommonStanza}{'host'} = $strCommonHost; - $oParamHash{$strCommonStanza}{'user'} = $strCommonUser; + $oParamHash{$strCommonStanza}{'db-host'} = $strCommonHost; + $oParamHash{$strCommonStanza}{'db-user'} = $strCommonUser; } - $oParamHash{'global:log'}{'level-console'} = 'error'; - $oParamHash{'global:log'}{'level-file'} = 'trace'; + $oParamHash{'global:log'}{'log-level-console'} = 'error'; + $oParamHash{'global:log'}{'log-level-file'} = 'trace'; - if ($strLocal eq REMOTE_BACKUP) + if ($strLocal eq BACKUP) { - if (defined($bHardlink) && $bHardlink) - { - $oParamHash{'global:backup'}{'hardlink'} = 'y'; - } + $oParamHash{'global:general'}{'repo-path'} = $strCommonRepoPath; } - elsif ($strLocal eq REMOTE_DB) + elsif ($strLocal eq DB) { + $oParamHash{'global:general'}{'repo-path'} = $strCommonLocalPath; + if (defined($strRemote)) { - $oParamHash{'global:log'}{'level-console'} = 'trace'; + $oParamHash{'global:log'}{'log-level-console'} = 'trace'; + + # if ($bArchiveAsync) + # { + # $oParamHash{'global:archive'}{path} = BackRestTestCommon_LocalPathGet(); + # } + + $oParamHash{'global:general'}{'repo-remote-path'} = $strCommonRepoPath; + } + else + { + $oParamHash{'global:general'}{'repo-path'} = $strCommonRepoPath; } - if ($bArchiveLocal) + if ($bArchiveAsync) { - $oParamHash{'global:archive'}{path} = BackRestTestCommon_ArchivePathGet(); - - if (!$bCompressAsync) - { - $oParamHash{'global:archive'}{'compress_async'} = 'n'; - } + $oParamHash{'global:archive'}{'archive-async'} = 'y'; + # + # if (!$bCompressAsync) + # { + # $oParamHash{'global:archive'}{'compress_async'} = 'n'; + # } } } else @@ -311,32 +656,37 @@ sub BackRestTestCommon_ConfigCreate confess "invalid local type ${strLocal}"; } - if (($strLocal eq REMOTE_BACKUP) || ($strLocal eq REMOTE_DB && !defined($strRemote))) + if (defined($iThreadMax) && $iThreadMax > 1) { - $oParamHash{'db:command:option'}{'psql'} = "--port=${iCommonDbPort}"; + $oParamHash{'global:general'}{'thread-max'} = $iThreadMax; + } + + if (($strLocal eq BACKUP) || ($strLocal eq DB && !defined($strRemote))) + { + $oParamHash{'db:command'}{'cmd-psql-option'} = "--port=${iCommonDbPort}"; + $oParamHash{'global:backup'}{'thread-max'} = $iThreadMax; + + if (defined($bHardlink) && $bHardlink) + { + $oParamHash{'global:backup'}{'hardlink'} = 'y'; + } } if (defined($bCompress) && !$bCompress) { - $oParamHash{'global:backup'}{'compress'} = 'n'; + $oParamHash{'global:general'}{'compress'} = 'n'; } - if (defined($bChecksum) && !$bChecksum) - { - $oParamHash{'global:backup'}{'checksum'} = 'n'; - } + # if (defined($bChecksum) && $bChecksum) + # { + # $oParamHash{'global:backup'}{'checksum'} = 'y'; + # } - $oParamHash{$strCommonStanza}{'path'} = $strCommonDbCommonPath; - $oParamHash{'global:backup'}{'path'} = $strCommonBackupPath; - - if (defined($iThreadMax)) - { - $oParamHash{'global:backup'}{'thread-max'} = $iThreadMax; - } + $oParamHash{$strCommonStanza}{'db-path'} = $strCommonDbCommonPath; # Write out the configuration file my $strFile = BackRestTestCommon_TestPathGet() . '/pg_backrest.conf'; - config_save($strFile, \%oParamHash); + ini_save($strFile, \%oParamHash); # Move the configuration file based on local if ($strLocal eq 'db') @@ -346,12 +696,12 @@ sub BackRestTestCommon_ConfigCreate } elsif ($strLocal eq 'backup' && !defined($strRemote)) { - rename($strFile, BackRestTestCommon_BackupPathGet() . '/pg_backrest.conf') - or die "unable to move ${strFile} to " . BackRestTestCommon_BackupPathGet() . '/pg_backrest.conf path'; + rename($strFile, BackRestTestCommon_RepoPathGet() . '/pg_backrest.conf') + or die "unable to move ${strFile} to " . BackRestTestCommon_RepoPathGet() . '/pg_backrest.conf path'; } else { - BackRestTestCommon_Execute("mv ${strFile} " . BackRestTestCommon_BackupPathGet() . '/pg_backrest.conf', true); + BackRestTestCommon_Execute("mv ${strFile} " . BackRestTestCommon_RepoPathGet() . '/pg_backrest.conf', true); } } @@ -408,14 +758,14 @@ sub BackRestTestCommon_DataPathGet return $strCommonDataPath; } -sub BackRestTestCommon_BackupPathGet +sub BackRestTestCommon_RepoPathGet { - return $strCommonBackupPath; + return $strCommonRepoPath; } -sub BackRestTestCommon_ArchivePathGet +sub BackRestTestCommon_LocalPathGet { - return $strCommonArchivePath; + return $strCommonLocalPath; } sub BackRestTestCommon_DbPathGet @@ -425,7 +775,17 @@ sub BackRestTestCommon_DbPathGet sub BackRestTestCommon_DbCommonPathGet { - return $strCommonDbCommonPath; + my $iIndex = shift; + + return $strCommonDbCommonPath . (defined($iIndex) ? "-${iIndex}" : ''); +} + +sub BackRestTestCommon_DbTablespacePathGet +{ + my $iTablespace = shift; + my $iIndex = shift; + + return $strCommonDbTablespacePath . (defined($iTablespace) ? "/ts${iTablespace}" . (defined($iIndex) ? "-${iIndex}" : '') : ''); } sub BackRestTestCommon_DbPortGet diff --git a/test/lib/BackRestTest/ConfigTest.pm b/test/lib/BackRestTest/ConfigTest.pm new file mode 100755 index 000000000..0454387ed --- /dev/null +++ b/test/lib/BackRestTest/ConfigTest.pm @@ -0,0 +1,816 @@ +#!/usr/bin/perl +#################################################################################################################################### +# ConfigTest.pl - Unit Tests for BackRest::Param and BackRest::Config +#################################################################################################################################### +package BackRestTest::ConfigTest; + +#################################################################################################################################### +# Perl includes +#################################################################################################################################### +use strict; +use warnings FATAL => qw(all); +use Carp qw(confess); + +use File::Basename qw(dirname); +use Cwd qw(abs_path); +use Scalar::Util 'blessed'; +#use Data::Dumper qw(Dumper); +#use Scalar::Util qw(blessed); +# use Test::More qw(no_plan); +# use Test::Deep; + +use lib dirname($0) . '/../lib'; +use BackRest::Exception; +use BackRest::Utility; +use BackRest::Config; + +use BackRestTest::CommonTest; + +use Exporter qw(import); +our @EXPORT = qw(BackRestTestConfig_Test); + +sub optionSetTest +{ + my $oOption = shift; + my $strKey = shift; + my $strValue = shift; + + $$oOption{option}{$strKey} = $strValue; +} + +sub optionSetBoolTest +{ + my $oOption = shift; + my $strKey = shift; + my $bValue = shift; + + $$oOption{boolean}{$strKey} = defined($bValue) ? $bValue : true; +} + +sub operationSetTest +{ + my $oOption = shift; + my $strOperation = shift; + + $$oOption{operation} = $strOperation; +} + +sub optionRemoveTest +{ + my $oOption = shift; + my $strKey = shift; + + delete($$oOption{option}{$strKey}); + delete($$oOption{boolean}{$strKey}); +} + +sub argvWriteTest +{ + my $oOption = shift; + + @ARGV = (); + + if (defined($$oOption{boolean})) + { + foreach my $strKey (keys $$oOption{boolean}) + { + if ($$oOption{boolean}{$strKey}) + { + $ARGV[@ARGV] = "--${strKey}"; + } + else + { + $ARGV[@ARGV] = "--no-${strKey}"; + } + } + } + + if (defined($$oOption{option})) + { + foreach my $strKey (keys $$oOption{option}) + { + $ARGV[@ARGV] = "--${strKey}=$$oOption{option}{$strKey}"; + } + } + + $ARGV[@ARGV] = $$oOption{operation}; + + &log(INFO, " command line: " . join(" ", @ARGV)); + + %$oOption = (); +} + +sub configLoadExpect +{ + my $oOption = shift; + my $strOperation = shift; + my $iExpectedError = shift; + my $strErrorParam1 = shift; + my $strErrorParam2 = shift; + my $strErrorParam3 = shift; + + my $oOptionRuleExpected = optionRuleGet(); + + operationSetTest($oOption, $strOperation); + argvWriteTest($oOption); + + eval + { + configLoad(); + }; + + if ($@) + { + if (!defined($iExpectedError)) + { + confess $@; + } + + my $oMessage = $@; + + if (blessed($oMessage) && $oMessage->isa('BackRest::Exception')) + { + if ($oMessage->code() != $iExpectedError) + { + confess "expected error ${iExpectedError} from configLoad but got " . $oMessage->code() . + " '" . $oMessage->message() . "'"; + } + + my $strError; + + if ($iExpectedError == ERROR_OPTION_REQUIRED) + { + $strError = "backup operation requires option: ${strErrorParam1}"; + } + elsif ($iExpectedError == ERROR_OPERATION_REQUIRED) + { + $strError = "operation must be specified"; + } + elsif ($iExpectedError == ERROR_OPTION_INVALID) + { + $strError = "option '${strErrorParam1}' not valid without option '${strErrorParam2}'"; + + if (defined($strErrorParam3)) + { + $strError .= @{$strErrorParam3} == 1 ? " = '$$strErrorParam3[0]'" : + " in ('" . join("', '",@{ $strErrorParam3}) . "')"; + } + } + elsif ($iExpectedError == ERROR_OPTION_INVALID_VALUE) + { + $strError = "'${strErrorParam1}' is not valid for '${strErrorParam2}' option"; + } + elsif ($iExpectedError == ERROR_OPTION_INVALID_RANGE) + { + $strError = "'${strErrorParam1}' is not valid for '${strErrorParam2}' option"; + } + elsif ($iExpectedError == ERROR_OPTION_INVALID_PAIR) + { + $strError = "'${strErrorParam1}' not valid key/value for '${strErrorParam2}' option"; + } + elsif ($iExpectedError == ERROR_OPTION_NEGATE) + { + $strError = "option '${strErrorParam1}' cannot be both set and negated"; + } + elsif ($iExpectedError == ERROR_FILE_INVALID) + { + $strError = "'${strErrorParam1}' is not a file"; + } + else + { + confess "must construct message for error ${iExpectedError}, use this as an example: '" . $oMessage->message() . "'"; + } + + if ($oMessage->message() ne $strError) + { + confess "expected error message \"${strError}\" from configLoad but got \"" . $oMessage->message() . "\""; + } + } + else + { + confess "configLoad should throw BackRest::Exception:\n$oMessage"; + } + } + else + { + if (defined($iExpectedError)) + { + confess "expected error ${iExpectedError} from configLoad but got success"; + } + } + + # cmp_deeply(OPTION_rule_get(), $oOptionRuleExpected, 'compare original and new rule hashes') + # or die 'comparison failed'; +} + +sub optionTestExpect +{ + my $strOption = shift; + my $strExpectedValue = shift; + my $strExpectedKey = shift; + + if (defined($strExpectedValue)) + { + my $strActualValue = optionGet($strOption); + + if (defined($strExpectedKey)) + { + # use Data::Dumper; + # &log(INFO, Dumper($strActualValue)); + # exit 0; + + $strActualValue = $$strActualValue{$strExpectedKey}; + } + + if (!defined($strActualValue)) + { + confess "expected option ${strOption} to have value ${strExpectedValue} but [undef] found instead"; + } + + $strActualValue eq $strExpectedValue + or confess "expected option ${strOption} to have value ${strExpectedValue} but ${strActualValue} found instead"; + } + elsif (optionTest($strOption)) + { + confess "expected option ${strOption} to be [undef], but " . optionGet($strOption) . ' found instead'; + } +} + +#################################################################################################################################### +# BackRestTestConfig_Test +#################################################################################################################################### +sub BackRestTestConfig_Test +{ + my $strTest = shift; + + # Setup test variables + my $iRun; + my $bCreate; + my $strStanza = 'main'; + my $oOption = {}; + my $oConfig = {}; + my @oyArray; + my $strConfigFile = BackRestTestCommon_TestPathGet() . '/pg_backrest.conf'; + + use constant BOGUS => 'bogus'; + + # Print test banner + &log(INFO, 'CONFIG MODULE ******************************************************************'); + BackRestTestCommon_Drop(); + + #------------------------------------------------------------------------------------------------------------------------------- + # Test command-line options + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'all' || $strTest eq 'option') + { + $iRun = 0; + &log(INFO, "Option module\n"); + + if (BackRestTestCommon_Run(++$iRun, 'backup with no stanza')) + { + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_REQUIRED, OPTION_STANZA); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup with boolean stanza')) + { + optionSetBoolTest($oOption, OPTION_STANZA); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPERATION_REQUIRED); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup type defaults to ' . BACKUP_TYPE_INCR)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_TYPE, BACKUP_TYPE_INCR); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup type set to ' . BACKUP_TYPE_FULL)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_TYPE, BACKUP_TYPE_FULL); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_TYPE, BACKUP_TYPE_FULL); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup type invalid')) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_TYPE, BOGUS); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, BOGUS, OPTION_TYPE); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup invalid force')) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_FORCE); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID, OPTION_FORCE, OPTION_NO_START_STOP); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup valid force')) + { + # $oOption = {}; + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_NO_START_STOP); + optionSetBoolTest($oOption, OPTION_FORCE); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_NO_START_STOP, true); + optionTestExpect(OPTION_FORCE, true); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup invalid value for ' . OPTION_TEST_DELAY)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_TEST); + optionSetTest($oOption, OPTION_TEST_DELAY, BOGUS); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, BOGUS, OPTION_TEST_DELAY); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup invalid ' . OPTION_TEST_DELAY)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_TEST_DELAY, 5); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID, OPTION_TEST_DELAY, OPTION_TEST); + } + + if (BackRestTestCommon_Run(++$iRun, 'backup check ' . OPTION_TEST_DELAY . ' undef')) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_TEST_DELAY); + } + + if (BackRestTestCommon_Run(++$iRun, 'restore invalid ' . OPTION_TARGET)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_TYPE, RECOVERY_TYPE_DEFAULT); + optionSetTest($oOption, OPTION_TARGET, BOGUS); + + @oyArray = (RECOVERY_TYPE_NAME, RECOVERY_TYPE_TIME, RECOVERY_TYPE_XID); + configLoadExpect($oOption, OP_RESTORE, ERROR_OPTION_INVALID, OPTION_TARGET, OPTION_TYPE, \@oyArray); + } + + if (BackRestTestCommon_Run(++$iRun, 'restore ' . OPTION_TARGET)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_TYPE, RECOVERY_TYPE_NAME); + optionSetTest($oOption, OPTION_TARGET, BOGUS); + + configLoadExpect($oOption, OP_RESTORE); + optionTestExpect(OPTION_TYPE, RECOVERY_TYPE_NAME); + optionTestExpect(OPTION_TARGET, BOGUS); + optionTestExpect(OPTION_TARGET_TIMELINE); + } + + if (BackRestTestCommon_Run(++$iRun, 'invalid string ' . OPTION_THREAD_MAX)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_THREAD_MAX, BOGUS); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, BOGUS, OPTION_THREAD_MAX); + } + + if (BackRestTestCommon_Run(++$iRun, 'invalid float ' . OPTION_THREAD_MAX)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_THREAD_MAX, '0.0'); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, '0.0', OPTION_THREAD_MAX); + } + + if (BackRestTestCommon_Run(++$iRun, 'valid ' . OPTION_THREAD_MAX)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_THREAD_MAX, '2'); + + configLoadExpect($oOption, OP_BACKUP); + } + + if (BackRestTestCommon_Run(++$iRun, 'valid float ' . OPTION_TEST_DELAY)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_TEST); + optionSetTest($oOption, OPTION_TEST_DELAY, '0.25'); + + configLoadExpect($oOption, OP_BACKUP); + } + + if (BackRestTestCommon_Run(++$iRun, 'valid int ' . OPTION_TEST_DELAY)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_TEST); + optionSetTest($oOption, OPTION_TEST_DELAY, 3); + + configLoadExpect($oOption, OP_BACKUP); + } + + if (BackRestTestCommon_Run(++$iRun, 'restore valid ' . OPTION_TARGET_TIMELINE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_TARGET_TIMELINE, 2); + + configLoadExpect($oOption, OP_RESTORE); + } + + if (BackRestTestCommon_Run(++$iRun, 'invalid ' . OPTION_BUFFER_SIZE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_BUFFER_SIZE, '512'); + + configLoadExpect($oOption, OP_RESTORE, ERROR_OPTION_INVALID_RANGE, '512', OPTION_BUFFER_SIZE); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' invalid option' . OPTION_RETENTION_ARCHIVE_TYPE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_RETENTION_ARCHIVE_TYPE, BOGUS); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID, OPTION_RETENTION_ARCHIVE_TYPE, OPTION_RETENTION_ARCHIVE); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' invalid value ' . OPTION_RETENTION_ARCHIVE_TYPE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_RETENTION_ARCHIVE, 3); + optionSetTest($oOption, OPTION_RETENTION_ARCHIVE_TYPE, BOGUS); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, BOGUS, OPTION_RETENTION_ARCHIVE_TYPE); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' valid value ' . OPTION_RETENTION_ARCHIVE_TYPE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_RETENTION_ARCHIVE, 1); + optionSetTest($oOption, OPTION_RETENTION_ARCHIVE_TYPE, BACKUP_TYPE_FULL); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_RETENTION_ARCHIVE, 1); + optionTestExpect(OPTION_RETENTION_ARCHIVE_TYPE, BACKUP_TYPE_FULL); + } + + if (BackRestTestCommon_Run(++$iRun, OP_RESTORE . ' invalid value ' . OPTION_RESTORE_RECOVERY_SETTING)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_RESTORE_RECOVERY_SETTING, '='); + + configLoadExpect($oOption, OP_RESTORE, ERROR_OPTION_INVALID_PAIR, '=', OPTION_RESTORE_RECOVERY_SETTING); + } + + if (BackRestTestCommon_Run(++$iRun, OP_RESTORE . ' invalid value ' . OPTION_RESTORE_RECOVERY_SETTING)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_RESTORE_RECOVERY_SETTING, '=' . BOGUS); + + configLoadExpect($oOption, OP_RESTORE, ERROR_OPTION_INVALID_PAIR, '=' . BOGUS, OPTION_RESTORE_RECOVERY_SETTING); + } + + if (BackRestTestCommon_Run(++$iRun, OP_RESTORE . ' invalid value ' . OPTION_RESTORE_RECOVERY_SETTING)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_RESTORE_RECOVERY_SETTING, BOGUS . '='); + + configLoadExpect($oOption, OP_RESTORE, ERROR_OPTION_INVALID_PAIR, BOGUS . '=', OPTION_RESTORE_RECOVERY_SETTING); + } + + if (BackRestTestCommon_Run(++$iRun, OP_RESTORE . ' valid value ' . OPTION_RESTORE_RECOVERY_SETTING)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_RESTORE_RECOVERY_SETTING, 'primary-conn-info=db.domain.net'); + + configLoadExpect($oOption, OP_RESTORE); + optionTestExpect(OPTION_RESTORE_RECOVERY_SETTING, 'db.domain.net', 'primary-conn-info'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' valid value ' . OPTION_COMMAND_PSQL)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_COMMAND_PSQL, '/psql -X %option%'); + optionSetTest($oOption, OPTION_COMMAND_PSQL_OPTION, '--port 5432'); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_COMMAND_PSQL, '/psql -X --port 5432'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' default value ' . OPTION_COMMAND_REMOTE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_COMMAND_PSQL, '/psql -X %option%'); + optionSetTest($oOption, OPTION_COMMAND_PSQL_OPTION, '--port 5432'); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_COMMAND_REMOTE, dirname(abs_path($0)) . '/pg_backrest_remote.pl'); + } + } + + #------------------------------------------------------------------------------------------------------------------------------- + # Test mixed command-line/config + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'all' || $strTest eq 'config') + { + $iRun = 0; + &log(INFO, "Config module\n"); + + BackRestTestCommon_Create(); + + if (BackRestTestCommon_Run(++$iRun, 'set and negate option ' . OPTION_CONFIG)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, '/dude/dude.conf'); + optionSetBoolTest($oOption, OPTION_CONFIG, false); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_NEGATE, OPTION_CONFIG); + } + + if (BackRestTestCommon_Run(++$iRun, 'option ' . OPTION_CONFIG)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetBoolTest($oOption, OPTION_CONFIG, false); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_CONFIG); + } + + if (BackRestTestCommon_Run(++$iRun, 'default option ' . OPTION_CONFIG)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_CONFIG, OPTION_DEFAULT_CONFIG); + } + + if (BackRestTestCommon_Run(++$iRun, 'config file is a path')) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, BackRestTestCommon_TestPathGet()); + + configLoadExpect($oOption, OP_BACKUP, ERROR_FILE_INVALID, BackRestTestCommon_TestPathGet()); + } + + if (BackRestTestCommon_Run(++$iRun, 'load from config stanza section - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + $$oConfig{"$strStanza:" . &OP_BACKUP}{&OPTION_THREAD_MAX} = 2; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 2); + } + + if (BackRestTestCommon_Run(++$iRun, 'load from config stanza inherited section - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + $$oConfig{"$strStanza:" . &CONFIG_SECTION_GENERAL}{&OPTION_THREAD_MAX} = 3; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 3); + } + + + if (BackRestTestCommon_Run(++$iRun, 'load from config global section - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &OP_BACKUP}{&OPTION_THREAD_MAX} = 2; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 2); + } + + if (BackRestTestCommon_Run(++$iRun, 'load from config global inherited section - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_GENERAL}{&OPTION_THREAD_MAX} = 5; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 5); + } + + if (BackRestTestCommon_Run(++$iRun, 'default - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 1); + } + + if (BackRestTestCommon_Run(++$iRun, 'command-line override - option ' . OPTION_THREAD_MAX)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_GENERAL}{&OPTION_THREAD_MAX} = 9; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_THREAD_MAX, 7); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_THREAD_MAX, 7); + } + + if (BackRestTestCommon_Run(++$iRun, 'invalid boolean - option ' . OPTION_HARDLINK)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &OP_BACKUP}{&OPTION_HARDLINK} = 'Y'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, 'Y', OPTION_HARDLINK); + } + + if (BackRestTestCommon_Run(++$iRun, 'invalid value - option ' . OPTION_LOG_LEVEL_CONSOLE)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_LOG}{&OPTION_LOG_LEVEL_CONSOLE} = BOGUS; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP, ERROR_OPTION_INVALID_VALUE, BOGUS, OPTION_LOG_LEVEL_CONSOLE); + } + + if (BackRestTestCommon_Run(++$iRun, 'valid value - option ' . OPTION_LOG_LEVEL_CONSOLE)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_LOG}{&OPTION_LOG_LEVEL_CONSOLE} = lc(INFO); + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_RESTORE); + } + + if (BackRestTestCommon_Run(++$iRun, 'archive-push - option ' . OPTION_LOG_LEVEL_CONSOLE)) + { + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_ARCHIVE_PUSH); + } + + if (BackRestTestCommon_Run(++$iRun, OP_EXPIRE . ' ' . OPTION_RETENTION_FULL)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_EXPIRE}{&OPTION_RETENTION_FULL} = 2; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_EXPIRE); + optionTestExpect(OPTION_RETENTION_FULL, 2); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' option ' . OPTION_COMPRESS)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_BACKUP}{&OPTION_COMPRESS} = 'n'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_COMPRESS, false); + } + + if (BackRestTestCommon_Run(++$iRun, OP_RESTORE . ' option ' . OPTION_RESTORE_RECOVERY_SETTING)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_RESTORE_RECOVERY_SETTING}{'archive-command'} = '/path/to/pg_backrest.pl'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_RESTORE); + optionTestExpect(OPTION_RESTORE_RECOVERY_SETTING, '/path/to/pg_backrest.pl', 'archive-command'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' option ' . OPTION_DB_PATH)) + { + $oConfig = {}; + $$oConfig{$strStanza}{&OPTION_DB_PATH} = '/path/to/db'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_DB_PATH, '/path/to/db'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_ARCHIVE_PUSH . ' option ' . OPTION_DB_PATH)) + { + $oConfig = {}; + $$oConfig{$strStanza}{&OPTION_DB_PATH} = '/path/to/db'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_ARCHIVE_PUSH); + optionTestExpect(OPTION_DB_PATH, '/path/to/db'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' option ' . OPTION_REPO_PATH)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_GENERAL}{&OPTION_REPO_PATH} = '/repo'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_REPO_PATH, '/repo'); + } + + if (BackRestTestCommon_Run(++$iRun, OP_BACKUP . ' valid value ' . OPTION_COMMAND_PSQL)) + { + $oConfig = {}; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_COMMAND}{&OPTION_COMMAND_PSQL} = '/psql -X %option%'; + $$oConfig{&CONFIG_GLOBAL . ':' . &CONFIG_SECTION_COMMAND}{&OPTION_COMMAND_PSQL_OPTION} = '--port=5432'; + ini_save($strConfigFile, $oConfig); + + optionSetTest($oOption, OPTION_STANZA, $strStanza); + optionSetTest($oOption, OPTION_DB_PATH, '/db'); + optionSetTest($oOption, OPTION_CONFIG, $strConfigFile); + + configLoadExpect($oOption, OP_BACKUP); + optionTestExpect(OPTION_COMMAND_PSQL, '/psql -X --port=5432'); + } + + # Cleanup + if (BackRestTestCommon_Cleanup()) + { + &log(INFO, 'cleanup'); + BackRestTestCommon_Drop(true); + } + } +} + +1; diff --git a/test/lib/BackRestTest/FileTest.pm b/test/lib/BackRestTest/FileTest.pm index edda90af0..e5be572f7 100755 --- a/test/lib/BackRestTest/FileTest.pm +++ b/test/lib/BackRestTest/FileTest.pm @@ -8,17 +8,20 @@ package BackRestTest::FileTest; # Perl includes #################################################################################################################################### use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; use Cwd 'abs_path'; use File::stat; use Fcntl ':mode'; use Scalar::Util 'blessed'; +use Time::HiRes qw(gettimeofday usleep); +use POSIX qw(ceil); use lib dirname($0) . '/../lib'; use BackRest::Utility; +use BackRest::Config; use BackRest::File; use BackRest::Remote; @@ -87,13 +90,26 @@ sub BackRestTestFile_Test &log(INFO, 'FILE MODULE ********************************************************************'); #------------------------------------------------------------------------------------------------------------------------------- - # Create remote + # Create remotes #------------------------------------------------------------------------------------------------------------------------------- my $oRemote = BackRest::Remote->new ( - strHost => $strHost, - strUser => $strUser, - strCommand => BackRestTestCommon_CommandRemoteGet() + $strHost, # Host + $strUser, # User + BackRestTestCommon_CommandRemoteGet(), # Command + OPTION_DEFAULT_BUFFER_SIZE, # Buffer size + OPTION_DEFAULT_COMPRESS_LEVEL, # Compress level + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, # Compress network level + ); + + my $oLocal = new BackRest::Remote + ( + undef, # Host + undef, # User + undef, # Command + OPTION_DEFAULT_BUFFER_SIZE, # Buffer size + OPTION_DEFAULT_COMPRESS_LEVEL, # Compress level + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, # Compress network level ); #------------------------------------------------------------------------------------------------------------------------------- @@ -109,25 +125,25 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 1; $bRemote++) { # Create the file object - my $oFile = (BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef - ))->clone(); + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal + ); # Loop through error for (my $bError = 0; $bError <= 1; $bError++) { - # Loop through permission (permission will be set on true) - for (my $bPermission = 0; $bPermission <= 1; $bPermission++) + # Loop through mode (mode will be set on true) + for (my $bMode = 0; $bMode <= 1; $bMode++) { my $strPathType = PATH_BACKUP_CLUSTER; # Increment the run, log, and decide whether this unit test should be run if (!BackRestTestCommon_Run(++$iRun, - "rmt ${bRemote}, err ${bError}, prm ${bPermission}")) {next} + "rmt ${bRemote}, err ${bError}, mode ${bMode}")) {next} # Setup test directory BackRestTestFile_Setup($bError); @@ -136,12 +152,12 @@ sub BackRestTestFile_Test mkdir("${strTestPath}/backup/db") or confess 'Unable to create test/backup/db directory'; my $strPath = 'path'; - my $strPermission; + my $strMode; - # If permission then set one (other than the default) - if ($bPermission) + # If mode then set one (other than the default) + if ($bMode) { - $strPermission = '0700'; + $strMode = '0700'; } # If not exists then set the path to something bogus @@ -156,7 +172,7 @@ sub BackRestTestFile_Test eval { - $oFile->path_create($strPathType, $strPath, $strPermission); + $oFile->path_create($strPathType, $strPath, $strMode); }; # Check for errors @@ -184,7 +200,7 @@ sub BackRestTestFile_Test confess 'path was not created'; } - # Check that the permissions were set correctly + # Check that the mode was set correctly my $oStat = lstat($strPathCheck); if (!defined($oStat)) @@ -192,11 +208,11 @@ sub BackRestTestFile_Test confess "unable to stat ${strPathCheck}"; } - if ($bPermission) + if ($bMode) { - if ($strPermission ne sprintf('%04o', S_IMODE($oStat->mode))) + if ($strMode ne sprintf('%04o', S_IMODE($oStat->mode))) { - confess "permissions were not set to {$strPermission}"; + confess "mode were not set to {$strMode}"; } } } @@ -217,13 +233,13 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 0; $bRemote++) { # Create the file object - my $oFile = BackRest::File->new + my $oFile = (new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef - ); + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal + ))->clone(1); # Loop through source exists for (my $bSourceExists = 0; $bSourceExists <= 1; $bSourceExists++) @@ -316,12 +332,12 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 0; $bRemote++) { # Create the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); # Loop through exists @@ -337,6 +353,7 @@ sub BackRestTestFile_Test my $strFile = "${strTestPath}/test.txt"; my $strSourceHash; + my $iSourceSize; if ($bError) { @@ -345,7 +362,7 @@ sub BackRestTestFile_Test elsif ($bExists) { system("echo 'TESTDATA' > ${strFile}"); - $strSourceHash = $oFile->hash(PATH_BACKUP_ABSOLUTE, $strFile); + ($strSourceHash, $iSourceSize) = $oFile->hash_size(PATH_BACKUP_ABSOLUTE, $strFile); } # Execute in eval in case of error @@ -383,7 +400,7 @@ sub BackRestTestFile_Test system("gzip -d ${strDestinationFile}") == 0 or die "could not decompress ${strDestinationFile}"; - my $strDestinationHash = $oFile->hash(PATH_BACKUP_ABSOLUTE, $strFile); + my ($strDestinationHash, $iDestinationSize) = $oFile->hash_size(PATH_BACKUP_ABSOLUTE, $strFile); if ($strSourceHash ne $strDestinationHash) { @@ -394,6 +411,63 @@ sub BackRestTestFile_Test } } + #------------------------------------------------------------------------------------------------------------------------------- + # Test wait() + #------------------------------------------------------------------------------------------------------------------------------- + if ($strTest eq 'all' || $strTest eq 'wait') + { + $iRun = 0; + + &log(INFO, '--------------------------------------------------------------------------------'); + &log(INFO, "Test File->wait()\n"); + + for (my $bRemote = 0; $bRemote <= 1; $bRemote++) + { + # Create the file object + my $oFile = new BackRest::File + ( + $strStanza, + $strTestPath, + $bRemote ? 'db' : undef, + $bRemote ? $oRemote : $oLocal + ); + + my $lTimeBegin = gettimeofday(); + + if (!BackRestTestCommon_Run(++$iRun, + "rmt ${bRemote}, begin ${lTimeBegin}")) {next} + + # If there is not enough time to complete the test then sleep + if (ceil($lTimeBegin) - $lTimeBegin < .250) + { + my $lSleepMs = ceil(((int($lTimeBegin) + 1) - $lTimeBegin) * 1000); + + usleep($lSleepMs * 1000); + + &log(DEBUG, "slept ${lSleepMs}ms: begin ${lTimeBegin}, end " . gettimeofday()); + + $lTimeBegin = gettimeofday(); + } + + # Run the test + my $lTimeBeginCheck = $oFile->wait(PATH_DB_ABSOLUTE); + + &log(DEBUG, "begin ${lTimeBegin}, check ${lTimeBeginCheck}, end " . time()); + + # Current time should have advanced by 1 second + if (time() == int($lTimeBegin)) + { + confess "time was not advanced by 1 second"; + } + + # lTimeBegin and lTimeBeginCheck should be equal + if (int($lTimeBegin) != $lTimeBeginCheck) + { + confess 'time begin ' || int($lTimeBegin) || "and check ${lTimeBeginCheck} should be equal"; + } + } + } + #------------------------------------------------------------------------------------------------------------------------------- # Test manifest() #------------------------------------------------------------------------------------------------------------------------------- @@ -419,12 +493,12 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 1; $bRemote++) { # Create the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); for (my $bError = 0; $bError <= 1; $bError++) @@ -527,8 +601,8 @@ sub BackRestTestFile_Test $oManifestHash{name}{"${strName}"}{user} : '') . ',' . (defined($oManifestHash{name}{"${strName}"}{group}) ? $oManifestHash{name}{"${strName}"}{group} : '') . ',' . - (defined($oManifestHash{name}{"${strName}"}{permission}) ? - $oManifestHash{name}{"${strName}"}{permission} : '') . ',' . + (defined($oManifestHash{name}{"${strName}"}{mode}) ? + $oManifestHash{name}{"${strName}"}{mode} : '') . ',' . (defined($oManifestHash{name}{"${strName}"}{modification_time}) ? $oManifestHash{name}{"${strName}"}{modification_time} : '') . ',' . (defined($oManifestHash{name}{"${strName}"}{inode}) ? @@ -561,12 +635,12 @@ sub BackRestTestFile_Test for (my $bRemote = false; $bRemote <= true; $bRemote++) { # Create the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); for (my $bSort = false; $bSort <= true; $bSort++) @@ -687,12 +761,12 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 1; $bRemote++) { - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); # Loop through exists @@ -788,24 +862,27 @@ sub BackRestTestFile_Test &log(INFO, '--------------------------------------------------------------------------------'); &log(INFO, "test File->hash()\n"); - for (my $bRemote = 0; $bRemote <= 1; $bRemote++) + for (my $bRemote = false; $bRemote <= true; $bRemote++) { - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); # Loop through error - for (my $bError = 0; $bError <= 1; $bError++) + for (my $bError = false; $bError <= true; $bError++) { # Loop through exists - for (my $bExists = 0; $bExists <= 1; $bExists++) + for (my $bExists = false; $bExists <= true; $bExists++) + { + # Loop through exists + for (my $bCompressed = false; $bCompressed <= true; $bCompressed++) { if (!BackRestTestCommon_Run(++$iRun, - "rmt ${bRemote}, err ${bError}, exists ${bExists}")) {next} + "rmt ${bRemote}, err ${bError}, exists ${bExists}, cmp ${bCompressed}")) {next} # Setup test directory BackRestTestFile_Setup($bError); @@ -823,15 +900,22 @@ sub BackRestTestFile_Test else { system("echo 'TESTDATA' > ${strFile}"); + + if ($bCompressed && !$bRemote) + { + $oFile->compress(PATH_BACKUP_ABSOLUTE, $strFile); + $strFile = $strFile . '.gz'; + } } # Execute in eval in case of error my $strHash; + my $iSize; my $bErrorExpected = !$bExists || $bError || $bRemote; eval { - $strHash = $oFile->hash(PATH_BACKUP_ABSOLUTE, $strFile) + ($strHash, $iSize) = $oFile->hash_size(PATH_BACKUP_ABSOLUTE, $strFile, $bCompressed) }; if ($@) @@ -855,6 +939,7 @@ sub BackRestTestFile_Test } } } + } } } @@ -870,12 +955,12 @@ sub BackRestTestFile_Test for (my $bRemote = 0; $bRemote <= 1; $bRemote++) { - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $bRemote ? 'backup' : undef, - oRemote => $bRemote ? $oRemote : undef + $strStanza, + $strTestPath, + $bRemote ? 'backup' : undef, + $bRemote ? $oRemote : $oLocal ); # Loop through exists @@ -952,6 +1037,9 @@ sub BackRestTestFile_Test { $iRun = 0; + # Loop through small/large + for (my $bLarge = false; $bLarge <= 2; $bLarge++) + { # Loop through backup local vs remote for (my $bBackupRemote = 0; $bBackupRemote <= 1; $bBackupRemote++) { @@ -968,34 +1056,34 @@ sub BackRestTestFile_Test my $strRemote = $bBackupRemote ? 'backup' : $bDbRemote ? 'db' : undef; # Create the file object - my $oFile = BackRest::File->new + my $oFile = new BackRest::File ( - strStanza => $strStanza, - strBackupPath => $strTestPath, - strRemote => $strRemote, - oRemote => defined($strRemote) ? $oRemote : undef + $strStanza, + $strTestPath, + $strRemote, + defined($strRemote) ? $oRemote : $oLocal ); - # Loop through source compression - for (my $bSourceCompressed = 0; $bSourceCompressed <= 1; $bSourceCompressed++) - { - # Loop through destination compression - for (my $bDestinationCompress = 0; $bDestinationCompress <= 1; $bDestinationCompress++) - { # Loop through source path types for (my $bSourcePathType = 0; $bSourcePathType <= 1; $bSourcePathType++) { # Loop through destination path types for (my $bDestinationPathType = 0; $bDestinationPathType <= 1; $bDestinationPathType++) { - # Loop through source ignore/require - for (my $bSourceIgnoreMissing = 0; $bSourceIgnoreMissing <= 1; $bSourceIgnoreMissing++) - { # Loop through source missing/present - for (my $bSourceMissing = 0; $bSourceMissing <= 1; $bSourceMissing++) + for (my $bSourceMissing = 0; $bSourceMissing <= !$bLarge; $bSourceMissing++) { - # Loop through small/large - for (my $bLarge = false; $bLarge <= defined($strRemote) && !$bSourceMissing; $bLarge++) + # Loop through source ignore/require + for (my $bSourceIgnoreMissing = 0; $bSourceIgnoreMissing <= !$bLarge; $bSourceIgnoreMissing++) + { + # Loop through checksum append + for (my $bChecksumAppend = 0; $bChecksumAppend <= !$bLarge; $bChecksumAppend++) + { + # Loop through source compression + for (my $bSourceCompressed = 0; $bSourceCompressed <= !$bSourceMissing; $bSourceCompressed++) + { + # Loop through destination compression + for (my $bDestinationCompress = 0; $bDestinationCompress <= !$bSourceMissing; $bDestinationCompress++) { my $strSourcePathType = $bSourcePathType ? PATH_DB_ABSOLUTE : PATH_BACKUP_ABSOLUTE; my $strSourcePath = $bSourcePathType ? 'db' : 'backup'; @@ -1004,16 +1092,16 @@ sub BackRestTestFile_Test my $strDestinationPath = $bDestinationPathType ? 'db' : 'backup'; if (!BackRestTestCommon_Run(++$iRun, - 'rmt ' . + "lrg ${bLarge}, rmt " . (defined($strRemote) && ($strRemote eq $strSourcePath || $strRemote eq $strDestinationPath) ? 1 : 0) . - ", lrg ${bLarge}, " . - 'srcpth ' . (defined($strRemote) && $strRemote eq $strSourcePath ? 'rmt' : 'lcl') . - ":${strSourcePath}, srccmp $bSourceCompressed, srcmiss ${bSourceMissing}, " . - "srcignmiss ${bSourceIgnoreMissing}, " . + ', srcpth ' . (defined($strRemote) && $strRemote eq $strSourcePath ? 'rmt' : 'lcl') . + ":${strSourcePath}, srcmiss ${bSourceMissing}, " . + "srcignmiss ${bSourceIgnoreMissing}, srccmp $bSourceCompressed, " . 'dstpth ' . (defined($strRemote) && $strRemote eq $strDestinationPath ? 'rmt' : 'lcl') . - ":${strDestinationPath}, dstcmp $bDestinationCompress")) {next} + ":${strDestinationPath}, chkapp ${bChecksumAppend}, " . + "dstcmp $bDestinationCompress")) {next} # Setup test directory BackRestTestFile_Setup(false); @@ -1023,8 +1111,12 @@ sub BackRestTestFile_Test my $strSourceFile = "${strTestPath}/${strSourcePath}/test-source"; my $strDestinationFile = "${strTestPath}/${strDestinationPath}/test-destination"; + my $strCopyHash; + my $iCopySize; + # Create the compressed or uncompressed test file my $strSourceHash; + my $iSourceSize; if (!$bSourceMissing) { @@ -1033,7 +1125,7 @@ sub BackRestTestFile_Test $strSourceFile .= '.bin'; $strDestinationFile .= '.bin'; - BackRestTestCommon_Execute('cp ' . BackRestTestCommon_DataPathGet() . "/test.archive.bin ${strSourceFile}"); + BackRestTestCommon_Execute('cp ' . BackRestTestCommon_DataPathGet() . "/test.archive${bLarge}.bin ${strSourceFile}"); } else { @@ -1043,7 +1135,21 @@ sub BackRestTestFile_Test system("echo 'TESTDATA' > ${strSourceFile}"); } - $strSourceHash = $oFile->hash(PATH_ABSOLUTE, $strSourceFile); + if ($bLarge == 1) + { + $strSourceHash = 'c2e63b6a49d53a53d6df1aa6b70c7c16747ca099'; + $iSourceSize = 16777216; + } + elsif ($bLarge == 2) + { + $strSourceHash = '1c7e00fd09b9dd11fc2966590b3e3274645dd031'; + $iSourceSize = 16777216; + } + else + { + $strSourceHash = '06364afe79d801433188262478a76d19777ef351'; + $iSourceSize = 9; + } if ($bSourceCompressed) { @@ -1062,11 +1168,12 @@ sub BackRestTestFile_Test eval { - $bReturn = $oFile->copy($strSourcePathType, $strSourceFile, - $strDestinationPathType, $strDestinationFile, - $bSourceCompressed, $bDestinationCompress, - $bSourceIgnoreMissing, undef, - '0700'); + ($bReturn, $strCopyHash, $iCopySize) = + $oFile->copy($strSourcePathType, $strSourceFile, + $strDestinationPathType, $strDestinationFile, + $bSourceCompressed, $bDestinationCompress, + $bSourceIgnoreMissing, undef, '0700', false, undef, undef, + $bChecksumAppend); }; # Check for errors after copy @@ -1109,6 +1216,24 @@ sub BackRestTestFile_Test confess 'expected source file missing error'; } + if (!defined($strCopyHash)) + { + confess 'copy hash must be defined'; + } + + if ($bChecksumAppend) + { + if ($bDestinationCompress) + { + $strDestinationFile = + substr($strDestinationFile, 0, length($strDestinationFile) -3) . "-${strSourceHash}.gz"; + } + else + { + $strDestinationFile .= '-' . $strSourceHash; + } + } + unless (-e $strDestinationFile) { confess "could not find destination file ${strDestinationFile}"; @@ -1124,12 +1249,18 @@ sub BackRestTestFile_Test or die "could not decompress ${strDestinationFile}"; } - my $strDestinationHash = $oFile->hash(PATH_ABSOLUTE, $strDestinationTest); + my ($strDestinationHash, $iDestinationSize) = $oFile->hash_size(PATH_ABSOLUTE, $strDestinationTest); - if ($strSourceHash ne $strDestinationHash) + if ($strSourceHash ne $strDestinationHash || $strSourceHash ne $strCopyHash) { - confess "source ${strSourceHash} and destination ${strDestinationHash} file hashes do not match"; + confess "source ${strSourceHash}, copy ${strCopyHash} and destination ${strDestinationHash} file hashes do not match"; } + + if ($iSourceSize != $iDestinationSize || $iSourceSize != $iCopySize) + { + confess "source ${iSourceSize}, copy ${iCopySize} and destination ${iDestinationSize} sizes do not match"; + } + } } } } diff --git a/test/lib/BackRestTest/UtilityTest.pm b/test/lib/BackRestTest/UtilityTest.pm index 5aff36ebf..bdd1289a7 100755 --- a/test/lib/BackRestTest/UtilityTest.pm +++ b/test/lib/BackRestTest/UtilityTest.pm @@ -1,6 +1,6 @@ #!/usr/bin/perl #################################################################################################################################### -# BackupTest.pl - Unit Tests for BackRest::File +# UtilityTest.pl - Unit Tests for BackRest::Utility #################################################################################################################################### package BackRestTest::UtilityTest; @@ -8,13 +8,14 @@ package BackRestTest::UtilityTest; # Perl includes #################################################################################################################################### use strict; -use warnings; -use Carp; +use warnings FATAL => qw(all); +use Carp qw(confess); use File::Basename; use lib dirname($0) . '/../lib'; use BackRest::Utility; +use BackRest::Config; use BackRest::File; use BackRestTest::CommonTest; @@ -22,29 +23,6 @@ use BackRestTest::CommonTest; use Exporter qw(import); our @EXPORT = qw(BackRestTestUtility_Test); -#################################################################################################################################### -# BackRestTestUtility_Drop -#################################################################################################################################### -sub BackRestTestUtility_Drop -{ - # Remove the test directory - system('rm -rf ' . BackRestTestCommon_TestPathGet()) == 0 - or die 'unable to remove ' . BackRestTestCommon_TestPathGet() . 'path'; -} - -#################################################################################################################################### -# BackRestTestUtility_Create -#################################################################################################################################### -sub BackRestTestUtility_Create -{ - # Drop the old test directory - BackRestTestUtility_Drop(); - - # Create the test directory - mkdir(BackRestTestCommon_TestPathGet(), oct('0770')) - or confess 'Unable to create ' . BackRestTestCommon_TestPathGet() . ' path'; -} - #################################################################################################################################### # BackRestTestUtility_Test #################################################################################################################################### @@ -60,6 +38,19 @@ sub BackRestTestUtility_Test # Print test banner &log(INFO, 'UTILITY MODULE ******************************************************************'); + #------------------------------------------------------------------------------------------------------------------------------- + # Create remote + #------------------------------------------------------------------------------------------------------------------------------- + my $oLocal = new BackRest::Remote + ( + undef, # Host + undef, # User + undef, # Command + OPTION_DEFAULT_BUFFER_SIZE, # Buffer size + OPTION_DEFAULT_COMPRESS_LEVEL, # Compress level + OPTION_DEFAULT_COMPRESS_LEVEL_NETWORK, # Compress network level + ); + #------------------------------------------------------------------------------------------------------------------------------- # Test config #------------------------------------------------------------------------------------------------------------------------------- @@ -67,7 +58,14 @@ sub BackRestTestUtility_Test { $iRun = 0; $bCreate = true; - my $oFile = BackRest::File->new(); + + my $oFile = new BackRest::File + ( + undef, + undef, + undef, + $oLocal + ); &log(INFO, "Test config\n"); @@ -77,7 +75,8 @@ sub BackRestTestUtility_Test # Create the test directory if ($bCreate) { - BackRestTestUtility_Create(); + BackRestTestCommon_Drop(); + BackRestTestCommon_Create(); $bCreate = false; } @@ -96,18 +95,18 @@ sub BackRestTestUtility_Test # Save the test config my $strFile = "${strTestPath}/config.cfg"; - config_save($strFile, \%oConfig); + ini_save($strFile, \%oConfig); my $strConfigHash = $oFile->hash(PATH_ABSOLUTE, $strFile); # Reload the test config my %oConfigTest; - config_load($strFile, \%oConfigTest); + ini_load($strFile, \%oConfigTest); # Resave the test config and compare hashes my $strFileTest = "${strTestPath}/config-test.cfg"; - config_save($strFileTest, \%oConfigTest); + ini_save($strFileTest, \%oConfigTest); my $strConfigTestHash = $oFile->hash(PATH_ABSOLUTE, $strFileTest); @@ -119,7 +118,7 @@ sub BackRestTestUtility_Test if (BackRestTestCommon_Cleanup()) { &log(INFO, 'cleanup'); - BackRestTestUtility_Drop(); + BackRestTestCommon_Drop(); } } } diff --git a/test/test.pl b/test/test.pl index 0512f5192..45d383be7 100755 --- a/test/test.pl +++ b/test/test.pl @@ -13,7 +13,8 @@ use Carp; use File::Basename; use Getopt::Long; use Cwd 'abs_path'; -use Cwd; +use Pod::Usage; +#use Test::More; use lib dirname($0) . '/../lib'; use BackRest::Utility; @@ -21,30 +22,89 @@ use BackRest::Utility; use lib dirname($0) . '/lib'; use BackRestTest::CommonTest; use BackRestTest::UtilityTest; +use BackRestTest::ConfigTest; use BackRestTest::FileTest; use BackRestTest::BackupTest; +#################################################################################################################################### +# Usage +#################################################################################################################################### + +=head1 NAME + +test.pl - Simple Postgres Backup and Restore Unit Tests + +=head1 SYNOPSIS + +test.pl [options] + + Test Options: + --module test module to execute: + --module-test execute the specified test in a module + --module-test-run execute only the specified test run + --thread-max max threads to run for backup/restore (default 4) + --dry-run show only the tests that would be executed but don't execute them + --no-cleanup don't cleaup after the last test is complete - useful for debugging + --infinite repeat selected tests forever + + Configuration Options: + --psql-bin path to the psql executables (e.g. /usr/lib/postgresql/9.3/bin/) + --test-path path where tests are executed (defaults to ./test) + --log-level log level to use for tests (defaults to INFO) + --quiet, -q equivalent to --log-level=off + + General Options: + --version display version and exit + --help display usage and exit +=cut + #################################################################################################################################### # Command line parameters #################################################################################################################################### -my $strLogLevel = 'off'; # Log level for tests +my $strLogLevel = 'info'; # Log level for tests my $strModule = 'all'; my $strModuleTest = 'all'; my $iModuleTestRun = undef; +my $iThreadMax = 1; my $bDryRun = false; my $bNoCleanup = false; my $strPgSqlBin; my $strTestPath; +my $bVersion = false; +my $bHelp = false; +my $bQuiet = false; +my $bInfinite = false; -GetOptions ('pgsql-bin=s' => \$strPgSqlBin, +GetOptions ('q|quiet' => \$bQuiet, + 'version' => \$bVersion, + 'help' => \$bHelp, + 'pgsql-bin=s' => \$strPgSqlBin, 'test-path=s' => \$strTestPath, 'log-level=s' => \$strLogLevel, 'module=s' => \$strModule, 'module-test=s' => \$strModuleTest, 'module-test-run=s' => \$iModuleTestRun, + 'thread-max=s' => \$iThreadMax, 'dry-run' => \$bDryRun, - 'no-cleanup' => \$bNoCleanup) - or die 'error in command line arguments'; + 'no-cleanup' => \$bNoCleanup, + 'infinite' => \$bInfinite) + or pod2usage(2); + +# Display version and exit if requested +if ($bVersion || $bHelp) +{ + print 'pg_backrest ' . version_get() . " unit test\n"; + + if ($bHelp) + { + print "\n"; + pod2usage(); + } + + exit 0; +} + +# Test::More->builder->output('/dev/null'); #################################################################################################################################### # Setup @@ -52,7 +112,12 @@ GetOptions ('pgsql-bin=s' => \$strPgSqlBin, # Set a neutral umask so tests work as expected umask(0); -# Set console log level to trace for testing +# Set console log level +if ($bQuiet) +{ + $strLogLevel = 'off'; +} + log_level_set(undef, uc($strLogLevel)); if ($strModuleTest ne 'all' && $strModule eq 'all') @@ -65,10 +130,36 @@ if (defined($iModuleTestRun) && $strModuleTest eq 'all') confess "--module-test must be provided for run \"${iModuleTestRun}\""; } -# Make sure PG bin has been defined +# Search for psql bin if (!defined($strPgSqlBin)) { - confess 'pgsql-bin was not defined'; + my @strySearchPath = ('/usr/lib/postgresql/VERSION/bin', '/Library/PostgreSQL/VERSION/bin'); + + foreach my $strSearchPath (@strySearchPath) + { + for (my $fVersion = 9; $fVersion >= 0; $fVersion -= 1) + { + my $strVersionPath = $strSearchPath; + $strVersionPath =~ s/VERSION/9\.$fVersion/g; + + if (-e "${strVersionPath}/initdb") + { + &log(INFO, "found pgsql-bin at ${strVersionPath}\n"); + $strPgSqlBin = ${strVersionPath}; + } + } + } + + if (!defined($strPgSqlBin)) + { + confess 'pgsql-bin was not defined and could not be located'; + } +} + +# Check thread total +if ($iThreadMax < 1 || $iThreadMax > 32) +{ + confess 'thread-max must be between 1 and 32'; } #################################################################################################################################### @@ -127,22 +218,39 @@ BackRestTestCommon_Setup($strTestPath, $strPgSqlBin, $iModuleTestRun, $bDryRun, # &log(INFO, "Testing with test_path = " . BackRestTestCommon_TestPathGet() . ", host = {strHost}, user = {strUser}, " . # "group = {strGroup}"); -if ($strModule eq 'all' || $strModule eq 'utility') -{ - BackRestTestUtility_Test($strModuleTest); -} +my $iRun = 0; -if ($strModule eq 'all' || $strModule eq 'file') +do { - BackRestTestFile_Test($strModuleTest); -} + if ($bInfinite) + { + $iRun++; + &log(INFO, "INFINITE - RUN ${iRun}\n"); + } -if ($strModule eq 'all' || $strModule eq 'backup') -{ - BackRestTestBackup_Test($strModuleTest); + if ($strModule eq 'all' || $strModule eq 'utility') + { + BackRestTestUtility_Test($strModuleTest); + } + + if ($strModule eq 'all' || $strModule eq 'config') + { + BackRestTestConfig_Test($strModuleTest); + } + + if ($strModule eq 'all' || $strModule eq 'file') + { + BackRestTestFile_Test($strModuleTest); + } + + if ($strModule eq 'all' || $strModule eq 'backup') + { + BackRestTestBackup_Test($strModuleTest, $iThreadMax); + } } +while ($bInfinite); if (!$bDryRun) { - &log(ASSERT, 'TESTS COMPLETED SUCCESSFULLY (DESPITE ANY ERROR MESSAGES YOU SAW)'); + &log(INFO, 'TESTS COMPLETED SUCCESSFULLY (DESPITE ANY ERROR MESSAGES YOU SAW)'); }