diff --git a/doc/xml/release.xml b/doc/xml/release.xml index 661b85b78..39cd9f0e5 100644 --- a/doc/xml/release.xml +++ b/doc/xml/release.xml @@ -13,6 +13,17 @@ + + + + + + + +

The backup command is implemented entirely in C.

+
+
+
diff --git a/doc/xml/user-guide.xml b/doc/xml/user-guide.xml index 037b5cdc9..b0e37ff01 100644 --- a/doc/xml/user-guide.xml +++ b/doc/xml/user-guide.xml @@ -2503,7 +2503,7 @@ Attempt a backup - + {[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup \: stop file exists for all stanzas @@ -2544,7 +2544,7 @@ Attempt a backup - + {[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} backup \: stop file exists for stanza demo diff --git a/lib/pgBackRest/Archive/Common.pm b/lib/pgBackRest/Archive/Common.pm index 2f596dccc..1b2a86325 100644 --- a/lib/pgBackRest/Archive/Common.pm +++ b/lib/pgBackRest/Archive/Common.pm @@ -43,238 +43,4 @@ use constant PG_WAL_SYSTEM_ID_OFFSET_LT_93 => 12; use constant PG_WAL_SEGMENT_SIZE => 16777216; push @EXPORT, qw(PG_WAL_SEGMENT_SIZE); -#################################################################################################################################### -# lsnNormalize -# -# Generates a normalized form from an LSN that can be used for comparison. -#################################################################################################################################### -sub lsnNormalize -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strLsn, - ) = - logDebugParam - ( - __PACKAGE__ . '::lsnFile', \@_, - {name => 'strLsn', trace => true}, - ); - - # Split the LSN into major and minor parts - my @stryLsnSplit = split('/', $strLsn); - - if (@stryLsnSplit != 2) - { - confess &log(ASSERT, "invalid lsn ${strLsn}"); - } - - my $strLsnNormal = uc(sprintf("%08s%08s", $stryLsnSplit[0], $stryLsnSplit[1])); - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'strLsnNormal', value => $strLsnNormal, trace => true} - ); - -} - -push @EXPORT, qw(lsnNormalize); - -#################################################################################################################################### -# lsnFileRange -# -# Generates a range of WAL filenames given the start and stop LSN. For pre-9.3 databases, use bSkipFF to exclude the FF that -# prior versions did not generate. -#################################################################################################################################### -sub lsnFileRange -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strLsnStart, - $strLsnStop, - $strDbVersion, - $iWalSegmentSize, - ) = - logDebugParam - ( - __PACKAGE__ . '::lsnFileRange', \@_, - {name => 'strLsnStart'}, - {name => 'strLsnStop'}, - {name => '$strDbVersion'}, - {name => '$iWalSegmentSize'}, - ); - - # Working variables - my @stryArchive; - my $iArchiveIdx = 0; - my $bSkipFF = $strDbVersion < PG_VERSION_93; - - # Iterate through all archive logs between start and stop - my @stryArchiveSplit = split('/', $strLsnStart); - my $iStartMajor = hex($stryArchiveSplit[0]); - my $iStartMinor = int(hex($stryArchiveSplit[1]) / $iWalSegmentSize); - - @stryArchiveSplit = split('/', $strLsnStop); - my $iStopMajor = hex($stryArchiveSplit[0]); - my $iStopMinor = int(hex($stryArchiveSplit[1]) / $iWalSegmentSize); - - $stryArchive[$iArchiveIdx] = uc(sprintf("%08x%08x", $iStartMajor, $iStartMinor)); - $iArchiveIdx += 1; - - while (!($iStartMajor == $iStopMajor && $iStartMinor == $iStopMinor)) - { - $iStartMinor += 1; - - if ($bSkipFF && $iStartMinor == 255 || !$bSkipFF && $iStartMinor > int(0xFFFFFFFF / $iWalSegmentSize)) - { - $iStartMajor += 1; - $iStartMinor = 0; - } - - $stryArchive[$iArchiveIdx] = uc(sprintf("%08x%08x", $iStartMajor, $iStartMinor)); - $iArchiveIdx += 1; - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'stryWalFileName', value => \@stryArchive} - ); -} - -push @EXPORT, qw(lsnFileRange); - -#################################################################################################################################### -# walSegmentFind -# -# Returns the filename of a WAL segment in the archive. Optionally, a wait time can be specified. In this case an error will be -# thrown when the WAL segment is not found. If the same WAL segment with multiple checksums is found then error. -#################################################################################################################################### -sub walSegmentFind -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $oStorageRepo, - $strArchiveId, - $strWalSegment, - $iWaitSeconds, - ) = - logDebugParam - ( - __PACKAGE__ . '::walSegmentFind', \@_, - {name => 'oStorageRepo'}, - {name => 'strArchiveId'}, - {name => 'strWalSegment'}, - {name => 'iWaitSeconds', required => false}, - ); - - # Error if not a segment - if (!walIsSegment($strWalSegment)) - { - confess &log(ERROR, "${strWalSegment} is not a WAL segment", ERROR_ASSERT); - } - - # Loop and wait for file to appear - my $oWait = waitInit($iWaitSeconds); - my @stryWalFileName; - - do - { - # Get the name of the requested WAL segment (may have compression extension) - push(@stryWalFileName, $oStorageRepo->list( - STORAGE_REPO_ARCHIVE . "/${strArchiveId}/" . substr($strWalSegment, 0, 16), - {strExpression => - '^' . substr($strWalSegment, 0, 24) . (walIsPartial($strWalSegment) ? "\\.partial" : '') . - "-[0-f]{40}(\\." . COMPRESS_EXT . "){0,1}\$", - bIgnoreMissing => true})); - } - while (@stryWalFileName == 0 && waitMore($oWait)); - - # If there is more than one matching archive file then there is a serious issue - either a bug in the archiver or the user has - # copied files around or removed archive.info. - if (@stryWalFileName > 1) - { - confess &log(ERROR, - "duplicates found in archive for WAL segment ${strWalSegment}: " . join(', ', @stryWalFileName) . - "\nHINT: are multiple primaries archiving to this stanza?", - ERROR_ARCHIVE_DUPLICATE); - } - - # If waiting and no WAL segment was found then throw an error - if (@stryWalFileName == 0 && defined($iWaitSeconds)) - { - confess &log( - ERROR, - "could not find WAL segment ${strWalSegment} after ${iWaitSeconds} second(s)" . - "\nHINT: is archive_command configured correctly?" . - "\nHINT: use the check command to verify that PostgreSQL is archiving.", - ERROR_ARCHIVE_TIMEOUT); - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'strWalFileName', value => $stryWalFileName[0]} - ); -} - -push @EXPORT, qw(walSegmentFind); - -#################################################################################################################################### -# walIsSegment -# -# Is the file a segment or some other file (e.g. .history, .backup, etc). -#################################################################################################################################### -sub walIsSegment -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strWalFile, - ) = - logDebugParam - ( - __PACKAGE__ . '::walIsSegment', \@_, - {name => 'strWalFile', trace => true}, - ); - - return $strWalFile =~ /^[0-F]{24}(\.partial){0,1}$/ ? true : false; -} - -push @EXPORT, qw(walIsSegment); - -#################################################################################################################################### -# walIsPartial -# -# Is the file a segment and partial. -#################################################################################################################################### -sub walIsPartial -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strWalFile, - ) = - logDebugParam - ( - __PACKAGE__ . '::walIsPartial', \@_, - {name => 'strWalFile', trace => true}, - ); - - return walIsSegment($strWalFile) && $strWalFile =~ /\.partial$/ ? true : false; -} - -push @EXPORT, qw(walIsPartial); - 1; diff --git a/lib/pgBackRest/Backup/Backup.pm b/lib/pgBackRest/Backup/Backup.pm deleted file mode 100644 index 10d16e64d..000000000 --- a/lib/pgBackRest/Backup/Backup.pm +++ /dev/null @@ -1,1117 +0,0 @@ -#################################################################################################################################### -# BACKUP MODULE -#################################################################################################################################### -package pgBackRest::Backup::Backup; - -use strict; -use warnings FATAL => qw(all); -use Carp qw(confess); -use English '-no_match_vars'; - -use Exporter qw(import); -use File::Basename; - -use pgBackRest::Archive::Common; -use pgBackRest::Backup::Common; -use pgBackRest::Backup::File; -use pgBackRest::Backup::Info; -use pgBackRest::Common::Cipher; -use pgBackRest::Common::Exception; -use pgBackRest::Common::Ini; -use pgBackRest::Common::Log; -use pgBackRest::Common::Wait; -use pgBackRest::Common::String; -use pgBackRest::Config::Config; -use pgBackRest::Db; -use pgBackRest::DbVersion; -use pgBackRest::Manifest; -use pgBackRest::Protocol::Local::Process; -use pgBackRest::Protocol::Helper; -use pgBackRest::Protocol::Storage::Helper; -use pgBackRest::Common::Io::Handle; -use pgBackRest::Storage::Base; -use pgBackRest::Storage::Helper; -use pgBackRest::Version; - -#################################################################################################################################### -# new -#################################################################################################################################### -sub new -{ - my $class = shift; # Class name - - # Create the class hash - my $self = {}; - bless $self, $class; - - # Assign function parameters, defaults, and log debug info - my ($strOperation) = logDebugParam(__PACKAGE__ . '->new'); - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'self', value => $self} - ); -} - -#################################################################################################################################### -# resumeClean - cleans the directory from a previous failed backup so it can be reused -#################################################################################################################################### -sub resumeClean -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $oStorageRepo, - $strBackupLabel, - $oManifest, - $oAbortedManifest, - $bOnline, - $bDelta, - $strTimelineCurrent, - $strTimelineLast, - ) = - logDebugParam - ( - __PACKAGE__ . '->resumeClean', \@_, - {name => 'oStorageRepo'}, - {name => 'strBackupLabel'}, - {name => 'oManifest'}, - {name => 'oAbortedManifest'}, - {name => 'bOnline'}, - {name => 'bDelta'}, - {name => 'strTimelineCurrent', required => false}, - {name => 'strTimelineLast', required => false}, - ); - - &log(DETAIL, 'clean resumed backup path: ' . $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . "/${strBackupLabel}")); - - # Build manifest for aborted backup path - my $hFile = $oStorageRepo->manifest(STORAGE_REPO_BACKUP . "/${strBackupLabel}"); - - # Get compress flag - my $bCompressed = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS); - - if (!$bDelta) - { - # Check to see if delta checksum should be enabled - $bDelta = $oAbortedManifest->checkDelta( - 'resumed', $oAbortedManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ONLINE, undef, $bOnline), - $strTimelineCurrent, $strTimelineLast); - - # If delta is still false, check the files for anomalies - if (!$bDelta) - { - my @stryFileList = (); - - foreach my $strName (sort(keys(%{$hFile}))) - { - # Ignore files that will never be in the manifest but should be preserved - if ($strName eq FILE_MANIFEST_COPY || - $strName eq '.') - { - next; - } - - if ($hFile->{$strName}{type} eq 'f') - { - # If the original backup was compressed then remove the extension before checking the manifest - my $strFile = $strName; - - if ($bCompressed) - { - $strFile = substr($strFile, 0, length($strFile) - 3); - } - - # To be preserved the file must exist in the new manifest and not be a reference to a previous backup and must - # have a checksum - if ($oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile) && - !$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE) && - $oAbortedManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM)) - { - push(@stryFileList, $strFile); - } - } - } - - # If there are files in the list then check if delta should be enabled - if (@stryFileList) - { - $bDelta = $oManifest->checkDeltaFile(\@stryFileList, $oAbortedManifest, undef); - } - } - } - - # Find paths and files to delete - my @stryFile; - - foreach my $strName (sort(keys(%{$hFile}))) - { - # Ignore files that will never be in the manifest but should be preserved - if ($strName eq FILE_MANIFEST_COPY || - $strName eq '.') - { - next; - } - - # Get the file type (all links will be deleted since they are easy to recreate) - my $cType = $hFile->{$strName}{type}; - - # If a directory check if it exists in the new manifest - if ($cType eq 'd') - { - if ($oManifest->test(MANIFEST_SECTION_TARGET_PATH, $strName)) - { - next; - } - } - # Else if a file - elsif ($cType eq 'f') - { - # If the original backup was compressed then remove the extension before checking the manifest - my $strFile = $strName; - - if ($bCompressed) - { - $strFile = substr($strFile, 0, length($strFile) - 3); - } - - # To be preserved the file must exist in the new manifest and not be a reference to a previous backup - if ($oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile) && - !$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE)) - { - # To be preserved the checksum must be defined - my $strChecksum = $oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM, false); - - # If the size and timestamp match OR if the size matches and the delta option is set, then keep the file. - # In the latter case, if the timestamp had changed then rather than removing and recopying the file, the file - # will be tested in backupFile to see if the db/repo checksum still matches: if so, it is not necessary to recopy, - # else it will need to be copied to the new backup. - if (defined($strChecksum) && - $oManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_SIZE) == - $oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_SIZE) && - ($bDelta || - $oManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_TIMESTAMP) == - $oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_TIMESTAMP))) - { - $oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksum); - - # Also copy page checksum results if they exist - my $bChecksumPage = - $oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, false); - - if (defined($bChecksumPage)) - { - $oManifest->boolSet(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, $bChecksumPage); - - if (!$bChecksumPage && - $oAbortedManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR)) - { - $oManifest->set( - MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR, - $oAbortedManifest->get( - MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR)); - } - } - - next; - } - } - } - - # If a directory then remove it - if ($cType eq 'd') - { - logDebugMisc($strOperation, "remove path ${strName}"); - $oStorageRepo->pathRemove(STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strName}", {bRecurse => true}); - } - # Else add the file/link to be deleted later - else - { - logDebugMisc($strOperation, "remove file ${strName}"); - push(@stryFile, STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strName}"); - } - } - - # Delete files in batch for more efficiency - if (@stryFile > 0) - { - $oStorageRepo->remove(\@stryFile); - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'bDelta', value => $bDelta, trace => true}, - ); -} - -#################################################################################################################################### -# processManifest -# -# Process the file level backup. Uses the information in the manifest to determine which files need to be copied. Directories -# and tablespace links are only created when needed, except in the case of a full backup or if hardlinks are requested. -#################################################################################################################################### -sub processManifest -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strDbMasterPath, - $strDbCopyPath, - $strType, - $strDbVersion, - $bCompress, - $bHardLink, - $oBackupManifest, - $strBackupLabel, - $strLsnStart, - ) = - logDebugParam - ( - __PACKAGE__ . '->processManifest', \@_, - {name => 'strDbMasterPath'}, - {name => 'strDbCopyPath'}, - {name => 'strType'}, - {name => 'strDbVersion'}, - {name => 'bCompress'}, - {name => 'bHardLink'}, - {name => 'oBackupManifest'}, - {name => 'strBackupLabel'}, - {name => 'strLsnStart', required => false}, - ); - - # Get the master protocol for keep-alive - my $oProtocolMaster = - !isDbLocal({iRemoteIdx => $self->{iMasterRemoteIdx}}) ? - protocolGet(CFGOPTVAL_REMOTE_TYPE_DB, $self->{iMasterRemoteIdx}) : undef; - defined($oProtocolMaster) && $oProtocolMaster->noOp(); - - # Initialize the backup process - my $oBackupProcess = new pgBackRest::Protocol::Local::Process(CFGOPTVAL_LOCAL_TYPE_DB); - - if ($self->{iCopyRemoteIdx} != $self->{iMasterRemoteIdx}) - { - $oBackupProcess->hostAdd($self->{iMasterRemoteIdx}, 1); - } - - $oBackupProcess->hostAdd($self->{iCopyRemoteIdx}, cfgOption(CFGOPT_PROCESS_MAX)); - - # Variables used for parallel copy - my $lFileTotal = 0; - my $lSizeTotal = 0; - - # If this is a full backup or hard-linked then create all paths and tablespace links - if ($bHardLink || $strType eq CFGOPTVAL_BACKUP_TYPE_FULL) - { - # Create paths - foreach my $strPath ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_PATH)) - { - storageRepo()->pathCreate(STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strPath}", {bIgnoreExists => true}); - } - - if (storageRepo()->capability(STORAGE_CAPABILITY_LINK)) - { - for my $strTarget ($oBackupManifest->keys(MANIFEST_SECTION_BACKUP_TARGET)) - { - if ($oBackupManifest->isTargetTablespace($strTarget)) - { - storageRepo()->linkCreate( - STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strTarget}", - STORAGE_REPO_BACKUP . "/${strBackupLabel}/" . MANIFEST_TARGET_PGDATA . "/${strTarget}", - {bRelative => true}); - } - } - } - } - - # Iterate all files in the manifest - foreach my $strRepoFile ( - sort {sprintf("%016d-%s", $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $b, MANIFEST_SUBKEY_SIZE), $b) cmp - sprintf("%016d-%s", $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $a, MANIFEST_SUBKEY_SIZE), $a)} - ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_FILE, INI_SORT_NONE))) - { - # If the file has a reference it does not need to be copied since it can be retrieved from the referenced backup - unless - # the option to checksum all files is set. However, if hardlinking is enabled the link will need to be created - my $strReference = $oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REFERENCE, false); - - if (defined($strReference)) - { - # If the delta option to checksum all files is not set or it is set and the file size of the referenced file is zero - # then skip checking/copying this file - if (!cfgOption(CFGOPT_DELTA) || - $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE) == 0) - { - # This file will not need to be copied - next; - } - } - - # By default put everything into a single queue - my $strQueueKey = MANIFEST_TARGET_PGDATA; - - # If the file belongs in a tablespace then put in a tablespace-specific queue - if (index($strRepoFile, DB_PATH_PGTBLSPC . '/') == 0) - { - $strQueueKey = DB_PATH_PGTBLSPC . '/' . (split('\/', $strRepoFile))[1]; - } - - # Create the file hash - my $bIgnoreMissing = true; - my $strDbFile = $oBackupManifest->dbPathGet($strDbCopyPath, $strRepoFile); - my $iHostConfigIdx = $self->{iCopyRemoteIdx}; - - # Certain files must be copied from the master - if ($oBackupManifest->boolGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_MASTER)) - { - $strDbFile = $oBackupManifest->dbPathGet($strDbMasterPath, $strRepoFile); - $iHostConfigIdx = $self->{iMasterRemoteIdx}; - } - - # Make sure that pg_control is not removed during the backup - if ($strRepoFile eq MANIFEST_TARGET_PGDATA . '/' . DB_FILE_PGCONTROL) - { - $bIgnoreMissing = false; - } - - # Increment file total and size - my $lSize = $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE); - - $lFileTotal++; - $lSizeTotal += $lSize; - - # Queue for parallel backup - $oBackupProcess->queueJob( - $iHostConfigIdx, $strQueueKey, $strRepoFile, OP_BACKUP_FILE, - [$strDbFile, $bIgnoreMissing, $lSize, - $oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, false), - cfgOption(CFGOPT_CHECKSUM_PAGE) ? isChecksumPage($strRepoFile) : false, - defined($strLsnStart) ? hex((split('/', $strLsnStart))[0]) : 0xFFFFFFFF, - defined($strLsnStart) ? hex((split('/', $strLsnStart))[1]) : 0xFFFFFFFF, - $strRepoFile, defined($strReference) ? true : false, $bCompress, cfgOption(CFGOPT_COMPRESS_LEVEL), - $strBackupLabel, cfgOption(CFGOPT_DELTA)], - {rParamSecure => $oBackupManifest->cipherPassSub() ? [$oBackupManifest->cipherPassSub()] : undef}); - - # Size and checksum will be removed and then verified later as a sanity check - $oBackupManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE); - $oBackupManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM); - } - - # pg_control should always be in the backup (unless this is an offline backup) - if (!$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, MANIFEST_FILE_PGCONTROL) && cfgOption(CFGOPT_ONLINE)) - { - confess &log(ERROR, DB_FILE_PGCONTROL . " must be present in all online backups\n" . - 'HINT: is something wrong with the clock or filesystem timestamps?', ERROR_FILE_MISSING); - } - - # If there are no files to backup then we'll exit with an error unless in test mode. The other way this could happen is if - # the database is down and backup is called with --no-online twice in a row. - if ($lFileTotal == 0) - { - confess &log(ERROR, "no files have changed since the last backup - this seems unlikely", ERROR_FILE_MISSING); - } - - # Running total of bytes copied - my $lSizeCurrent = 0; - - # Determine how often the manifest will be saved - my $lManifestSaveCurrent = 0; - my $lManifestSaveSize = int($lSizeTotal / 100); - - if (cfgOptionSource(CFGOPT_MANIFEST_SAVE_THRESHOLD) ne CFGDEF_SOURCE_DEFAULT || - $lManifestSaveSize < cfgOption(CFGOPT_MANIFEST_SAVE_THRESHOLD)) - { - $lManifestSaveSize = cfgOption(CFGOPT_MANIFEST_SAVE_THRESHOLD); - } - - # Run the backup jobs and process results - while (my $hyJob = $oBackupProcess->process()) - { - foreach my $hJob (@{$hyJob}) - { - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_HOST, $hJob->{iHostConfigIdx}), false), - $hJob->{iProcessId}, @{$hJob->{rParam}}[0], @{$hJob->{rParam}}[7], @{$hJob->{rParam}}[2], @{$hJob->{rParam}}[3], - @{$hJob->{rParam}}[4], @{$hJob->{rResult}}, $lSizeTotal, $lSizeCurrent, $lManifestSaveSize, - $lManifestSaveCurrent); - } - - # A keep-alive is required here because if there are a large number of resumed files that need to be checksummed - # then the remote might timeout while waiting for a command. - protocolKeepAlive(); - } - - foreach my $strFile ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_FILE)) - { - # If the file has a reference, then it was not copied since it can be retrieved from the referenced backup. However, if - # hardlinking is enabled the link will need to be created. - my $strReference = $oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE, false); - - if ($strReference) - { - # If hardlinking is enabled then create a hardlink for files that have not changed since the last backup - if ($bHardLink) - { - &log(DETAIL, "hardlink ${strFile} to ${strReference}"); - - storageRepo()->linkCreate( - STORAGE_REPO_BACKUP . "/${strReference}/${strFile}" . ($bCompress ? qw{.} . COMPRESS_EXT : ''), - STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strFile}" . ($bCompress ? qw{.} . COMPRESS_EXT : ''), - {bHard => true}); - } - # Else log the reference. With delta, it is possible that references may have been removed if a file needed to be - # recopied. - else - { - logDebugMisc($strOperation, "reference ${strFile} to ${strReference}"); - } - } - } - - # Validate the manifest - $oBackupManifest->validate(); - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'lSizeTotal', value => $lSizeTotal} - ); -} - -#################################################################################################################################### -# process -# -# Process the database backup. -#################################################################################################################################### -sub process -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my ($strOperation) = logDebugParam(__PACKAGE__ . '->process'); - - # Record timestamp start - my $lTimestampStart = time(); - - # Initialize the local file object - my $oStorageRepo = storageRepo(); - - # Store local type, compress, and hardlink options since they can be modified by the process - my $strType = cfgOption(CFGOPT_TYPE); - my $bCompress = cfgOption(CFGOPT_COMPRESS); - my $bHardLink = cfgOption(CFGOPT_REPO_HARDLINK); - - # Load the backup.info - my $oBackupInfo = new pgBackRest::Backup::Info($oStorageRepo->pathGet(STORAGE_REPO_BACKUP)); - - # Get passphrase to open manifest (undefined if repo not encrypted) and initialize passphrase variable for backup files - my $strCipherPassManifest = $oBackupInfo->cipherPassSub(); - my $strCipherPassBackupSet; - - # Initialize database objects - my $oDbMaster = undef; - my $oDbStandby = undef; - - # Get the database objects - ($oDbMaster, $self->{iMasterRemoteIdx}, $oDbStandby, $self->{iCopyRemoteIdx}) = dbObjectGet(); - - # If remote copy was not explicitly set then set it equal to master - if (!defined($self->{iCopyRemoteIdx})) - { - $self->{iCopyRemoteIdx} = $self->{iMasterRemoteIdx}; - } - - # If backup from standby option is set but a standby was not configured in the config file or on the command line, then turn off - # CFGOPT_BACKUP_STANDBY & warn that backups will be performed from the master. - if (!defined($oDbStandby) && cfgOption(CFGOPT_BACKUP_STANDBY)) - { - cfgOptionSet(CFGOPT_BACKUP_STANDBY, false); - &log(WARN, 'option backup-standby is enabled but standby is not properly configured - ' . - 'backups will be performed from the master'); - } - - # Initialize the master file object - my $oStorageDbMaster = storageDb({iRemoteIdx => $self->{iMasterRemoteIdx}}); - - # Determine the database paths - my $strDbMasterPath = cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_PATH, $self->{iMasterRemoteIdx})); - my $strDbCopyPath = cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_PATH, $self->{iCopyRemoteIdx})); - - # Database info - my ($strDbVersion, $iControlVersion, $iCatalogVersion, $ullDbSysId) = $oDbMaster->info(); - - my $iDbHistoryId = $oBackupInfo->check($strDbVersion, $iControlVersion, $iCatalogVersion, $ullDbSysId); - - # Find the previous backup based on the type - my $oLastManifest; - my $strBackupLastPath; - my $strTimelineLast; - - if ($strType ne CFGOPTVAL_BACKUP_TYPE_FULL) - { - $strBackupLastPath = $oBackupInfo->last( - $strType eq CFGOPTVAL_BACKUP_TYPE_DIFF ? CFGOPTVAL_BACKUP_TYPE_FULL : CFGOPTVAL_BACKUP_TYPE_INCR); - - # If there is a prior backup and it is for the current database, then use it as base - if (defined($strBackupLastPath) && $oBackupInfo->confirmDb($strBackupLastPath, $strDbVersion, $ullDbSysId)) - { - $oLastManifest = new pgBackRest::Manifest( - $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . "/${strBackupLastPath}/" . FILE_MANIFEST), - {strCipherPass => $strCipherPassManifest}); - - # If the repo is encrypted then use the passphrase in this manifest for the backup set - $strCipherPassBackupSet = $oLastManifest->cipherPassSub(); - - # Get archive segment timeline for determining if a timeline switch has occurred. Only defined for prior online backup. - if ($oLastManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP)) - { - $strTimelineLast = substr($oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP), 0, 8); - } - - &log(INFO, 'last backup label = ' . $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL) . - ', version = ' . $oLastManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION)); - - # If this is incr or diff warn if certain options have changed - my $strKey; - - # Warn if compress option changed - if (!$oLastManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, $bCompress)) - { - &log(WARN, "${strType} backup cannot alter compress option to '" . boolFormat($bCompress) . - "', reset to value in ${strBackupLastPath}"); - $bCompress = $oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS); - } - - # Warn if hardlink option changed - if (!$oLastManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, $bHardLink)) - { - &log(WARN, "${strType} backup cannot alter hardlink option to '" . boolFormat($bHardLink) . - "', reset to value in ${strBackupLastPath}"); - $bHardLink = $oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK); - } - } - else - { - &log(WARN, "no prior backup exists, ${strType} backup has been changed to full"); - $strType = CFGOPTVAL_BACKUP_TYPE_FULL; - $strBackupLastPath = undef; - } - } - - # Search cluster directory for an aborted backup - my $strBackupLabel; - my $oAbortedManifest; - my $strBackupPath; - my $strTimelineAborted; - - foreach my $strAbortedBackup ($oStorageRepo->list( - STORAGE_REPO_BACKUP, {strExpression => backupRegExpGet(true, true, true), strSortOrder => 'reverse'})) - { - # Aborted backups have a copy of the manifest but no main - if ($oStorageRepo->exists(STORAGE_REPO_BACKUP . "/${strAbortedBackup}/" . FILE_MANIFEST_COPY) && - !$oStorageRepo->exists(STORAGE_REPO_BACKUP . "/${strAbortedBackup}/" . FILE_MANIFEST)) - { - my $bUsable; - my $strReason = "resume is disabled"; - $strBackupPath = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . "/${strAbortedBackup}"); - - # Attempt to read the manifest file in the aborted backup to see if it can be used. If any error at all occurs then the - # backup will be considered unusable and a resume will not be attempted. - if (cfgOption(CFGOPT_RESUME)) - { - $strReason = "unable to read ${strBackupPath}/" . FILE_MANIFEST; - - eval - { - # Load the aborted manifest - $oAbortedManifest = new pgBackRest::Manifest("${strBackupPath}/" . FILE_MANIFEST, - {strCipherPass => $strCipherPassManifest}); - - # Key and values that do not match - my $strKey; - my $strValueNew; - my $strValueAborted; - - # Check version - if ($oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION) ne PROJECT_VERSION) - { - $strKey = INI_KEY_VERSION; - $strValueNew = PROJECT_VERSION; - $strValueAborted = $oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION); - } - # Check format - elsif ($oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_FORMAT) ne REPOSITORY_FORMAT) - { - $strKey = INI_KEY_FORMAT; - $strValueNew = REPOSITORY_FORMAT; - $strValueAborted = $oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_FORMAT); - } - # Check backup type - elsif ($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE) ne $strType) - { - $strKey = MANIFEST_KEY_TYPE; - $strValueNew = $strType; - $strValueAborted = $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE); - } - # Check prior label - elsif ($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, '') ne - (defined($strBackupLastPath) ? $strBackupLastPath : '')) - { - $strKey = MANIFEST_KEY_PRIOR; - $strValueNew = defined($strBackupLastPath) ? $strBackupLastPath : ''; - $strValueAborted = - $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, ''); - } - # Check compression - elsif ($oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS) != - cfgOption(CFGOPT_COMPRESS)) - { - $strKey = MANIFEST_KEY_COMPRESS; - $strValueNew = cfgOption(CFGOPT_COMPRESS); - $strValueAborted = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS); - } - # Check hardlink - elsif ($oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK) != - cfgOption(CFGOPT_REPO_HARDLINK)) - { - $strKey = MANIFEST_KEY_HARDLINK; - $strValueNew = cfgOption(CFGOPT_REPO_HARDLINK); - $strValueAborted = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK); - } - - # If key is defined then something didn't match - if (defined($strKey)) - { - $strReason = "new ${strKey} '${strValueNew}' does not match aborted ${strKey} '${strValueAborted}'"; - } - # Else the backup can be resumed - else - { - $bUsable = true; - } - - return true; - } - or do - { - $bUsable = false; - } - } - - # If the backup is usable then set the backup label - if ($bUsable) - { - $strBackupLabel = $strAbortedBackup; - - # If the repo is encrypted, set the backup set passphrase from this manifest - if (defined($strCipherPassManifest)) - { - $strCipherPassBackupSet = $oAbortedManifest->cipherPassSub(); - } - - # Get the archive segment timeline for determining if a timeline switch has occurred. Only defined for prior online - # backup. - if ($oAbortedManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP)) - { - $strTimelineAborted = substr($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP), 0, 8); - } - elsif ($oAbortedManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START)) - { - $strTimelineAborted = substr($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START), 0, 8); - } - } - else - { - &log(WARN, "aborted backup ${strAbortedBackup} cannot be resumed: ${strReason}"); - - $oStorageRepo->pathRemove(STORAGE_REPO_BACKUP . "/${strAbortedBackup}", {bRecurse => true}); - undef($oAbortedManifest); - } - - last; - } - } - - # Generate a passphrase for the backup set if the repo is encrypted - if (defined($strCipherPassManifest) && !defined($strCipherPassBackupSet) && $strType eq CFGOPTVAL_BACKUP_TYPE_FULL) - { - $strCipherPassBackupSet = cipherPassGen(); - } - - # If backup label is not defined then create the label and path. - if (!defined($strBackupLabel)) - { - $strBackupLabel = backupLabel($oStorageRepo, $strType, $strBackupLastPath, $lTimestampStart); - $strBackupPath = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . "/${strBackupLabel}"); - } - - # Declare the backup manifest. Since the manifest could be an aborted backup, don't load it from the file here. - # Instead just instantiate it. Pass the passphrases to open the manifest and one to encrypt the backup files if the repo is - # encrypted (undefined if not). - my $oBackupManifest = new pgBackRest::Manifest("$strBackupPath/" . FILE_MANIFEST, - {bLoad => false, strDbVersion => $strDbVersion, iDbCatalogVersion => $iCatalogVersion, - strCipherPass => defined($strCipherPassManifest) ? $strCipherPassManifest : undef, - strCipherPassSub => defined($strCipherPassManifest) ? $strCipherPassBackupSet : undef}); - - # Backup settings - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE, undef, $strType); - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_START, undef, $lTimestampStart); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BACKUP_STANDBY, undef, cfgOption(CFGOPT_BACKUP_STANDBY)); - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BUFFER_SIZE, undef, cfgOption(CFGOPT_BUFFER_SIZE)); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, $bCompress); - $oBackupManifest->numericSet( - MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS_LEVEL, undef, cfgOption(CFGOPT_COMPRESS_LEVEL)); - $oBackupManifest->numericSet( - MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS_LEVEL_NETWORK, undef, cfgOption(CFGOPT_COMPRESS_LEVEL_NETWORK)); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, $bHardLink); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ONLINE, undef, cfgOption(CFGOPT_ONLINE)); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ARCHIVE_COPY, undef, - !cfgOption(CFGOPT_ONLINE) || - (cfgOption(CFGOPT_ARCHIVE_CHECK) && cfgOption(CFGOPT_ARCHIVE_COPY))); - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ARCHIVE_CHECK, undef, - cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_ARCHIVE_CHECK)); - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_PROCESS_MAX, undef, cfgOption(CFGOPT_PROCESS_MAX)); - - # Database settings - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_DB_ID, undef, $iDbHistoryId); - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_CONTROL, undef, $iControlVersion); - $oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_SYSTEM_ID, undef, $ullDbSysId); - - # Backup from standby can only be used on PostgreSQL >= 9.1 - if (cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_BACKUP_STANDBY) && $strDbVersion < PG_VERSION_BACKUP_STANDBY) - { - confess &log(ERROR, - 'option \'' . cfgOptionName(CFGOPT_BACKUP_STANDBY) . '\' not valid for PostgreSQL < ' . PG_VERSION_BACKUP_STANDBY, - ERROR_CONFIG); - } - - # Start backup (unless --no-online is set) - my $strArchiveStart = undef; - my $strLsnStart = undef; - my $iWalSegmentSize = undef; - my $hTablespaceMap = undef; - my $hDatabaseMap = undef; - my $strTimelineCurrent = undef; - - # If this is an offline backup - if (!cfgOption(CFGOPT_ONLINE)) - { - # If checksum-page is not explicitly enabled then disable it. Even if the version is high enough to have checksums we can't - # know if they are enabled without asking the database. When pg_control can be reliably parsed then this decision could be - # based on that. - if (!cfgOptionTest(CFGOPT_CHECKSUM_PAGE)) - { - cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false); - } - - # Check if Postgres is running and if so only continue when forced - if ($oStorageDbMaster->exists($strDbMasterPath . '/' . DB_FILE_POSTMASTERPID)) - { - if (cfgOption(CFGOPT_FORCE)) - { - &log(WARN, '--no-online passed and ' . DB_FILE_POSTMASTERPID . ' exists but --force was passed so backup will ' . - 'continue though it looks like the postmaster is running and the backup will probably not be ' . - 'consistent'); - } - else - { - confess &log(ERROR, '--no-online passed but ' . DB_FILE_POSTMASTERPID . ' exists - looks like the postmaster is ' . - 'running. Shutdown the postmaster and try again, or use --force.', ERROR_POSTMASTER_RUNNING); - } - } - } - # Else start the backup normally - else - { - # Start the backup - ($strArchiveStart, $strLsnStart, $iWalSegmentSize) = - $oDbMaster->backupStart( - PROJECT_NAME . ' backup started at ' . timestampFormat(undef, $lTimestampStart), cfgOption(CFGOPT_START_FAST)); - - # Record the archive start location - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START, undef, $strArchiveStart); - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_START, undef, $strLsnStart); - &log(INFO, "backup start archive = ${strArchiveStart}, lsn = ${strLsnStart}"); - - # Get the timeline from the archive - $strTimelineCurrent = substr($strArchiveStart, 0, 8); - - # Get tablespace map - $hTablespaceMap = $oDbMaster->tablespaceMapGet(); - - # Get database map - $hDatabaseMap = $oDbMaster->databaseMapGet(); - - # Wait for replay on the standby to catch up - if (cfgOption(CFGOPT_BACKUP_STANDBY)) - { - my ($strStandbyDbVersion, $iStandbyControlVersion, $iStandbyCatalogVersion, $ullStandbyDbSysId) = $oDbStandby->info(); - $oBackupInfo->check($strStandbyDbVersion, $iStandbyControlVersion, $iStandbyCatalogVersion, $ullStandbyDbSysId); - - $oDbStandby->configValidate(); - - &log(INFO, "wait for replay on the standby to reach ${strLsnStart}"); - - my ($strReplayedLSN, $strCheckpointLSN) = $oDbStandby->replayWait($strLsnStart); - - &log( - INFO, - "replay on the standby reached ${strReplayedLSN}" . - (defined($strCheckpointLSN) ? ", checkpoint ${strCheckpointLSN}" : '')); - - # The standby db object won't be used anymore so undef it to catch any subsequent references - undef($oDbStandby); - protocolDestroy(CFGOPTVAL_REMOTE_TYPE_DB, $self->{iCopyRemoteIdx}, true); - } - } - - # Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only certain files would - # be checksummed and the list could be incomplete during reporting. - if ($strType ne CFGOPTVAL_BACKUP_TYPE_FULL && defined($strBackupLastPath)) - { - # If not defined this backup was done in a version prior to page checksums being introduced. Just set checksum-page to - # false and move on without a warning. Page checksums will start on the next full backup. - if (!$oLastManifest->test(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE)) - { - cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false); - } - else - { - my $bChecksumPageLast = - $oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE); - - if ($bChecksumPageLast != cfgOption(CFGOPT_CHECKSUM_PAGE)) - { - &log(WARN, - "${strType} backup cannot alter '" . cfgOptionName(CFGOPT_CHECKSUM_PAGE) . "' option to '" . - boolFormat(cfgOption(CFGOPT_CHECKSUM_PAGE)) . "', reset to '" . boolFormat($bChecksumPageLast) . - "' from ${strBackupLastPath}"); - cfgOptionSet(CFGOPT_CHECKSUM_PAGE, $bChecksumPageLast); - } - } - } - - # Record checksum-page option in the manifest - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE, undef, cfgOption(CFGOPT_CHECKSUM_PAGE)); - - # Build the manifest. The delta option may have changed from false to true during the manifest build so set it to the result. - cfgOptionSet(CFGOPT_DELTA, $oBackupManifest->build( - $oStorageDbMaster, $strDbMasterPath, $oLastManifest, cfgOption(CFGOPT_ONLINE), cfgOption(CFGOPT_DELTA), $hTablespaceMap, - $hDatabaseMap, cfgOption(CFGOPT_EXCLUDE, false), $strTimelineCurrent, $strTimelineLast)); - - # If resuming from an aborted backup - if (defined($oAbortedManifest)) - { - &log(WARN, "aborted backup ${strBackupLabel} of same type exists, will be cleaned to remove invalid files and resumed"); - - # Clean the backup path before resuming. The delta option may have changed from false to true during the resume clean - # so set it to the result. - cfgOptionSet(CFGOPT_DELTA, $self->resumeClean($oStorageRepo, $strBackupLabel, $oBackupManifest, $oAbortedManifest, - cfgOption(CFGOPT_ONLINE), cfgOption(CFGOPT_DELTA), $strTimelineCurrent, $strTimelineAborted)); - } - # Else create the backup path - else - { - logDebugMisc($strOperation, "create backup path ${strBackupPath}"); - $oStorageRepo->pathCreate(STORAGE_REPO_BACKUP . "/${strBackupLabel}"); - } - - # Set the delta option in the manifest - $oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_DELTA, undef, cfgOption(CFGOPT_DELTA)); - - # Save the backup manifest - $oBackupManifest->saveCopy(); - - # Perform the backup - my $lBackupSizeTotal = - $self->processManifest( - $strDbMasterPath, $strDbCopyPath, $strType, $strDbVersion, $bCompress, $bHardLink, $oBackupManifest, $strBackupLabel, - $strLsnStart); - &log(INFO, "${strType} backup size = " . fileSizeFormat($lBackupSizeTotal)); - - # Master file object no longer needed - undef($oStorageDbMaster); - - # Stop backup (unless --no-online is set) - my $strArchiveStop = undef; - my $strLsnStop = undef; - - if (cfgOption(CFGOPT_ONLINE)) - { - ($strArchiveStop, $strLsnStop, my $strTimestampDbStop, my $oFileHash) = $oDbMaster->backupStop(); - - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP, undef, $strArchiveStop); - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_STOP, undef, $strLsnStop); - &log(INFO, "backup stop archive = ${strArchiveStop}, lsn = ${strLsnStop}"); - - # Write out files returned from stop backup - foreach my $strFile (sort(keys(%{$oFileHash}))) - { - # Only save the file if it has content - if (defined($oFileHash->{$strFile})) - { - my $rhyFilter = [{strClass => STORAGE_FILTER_SHA}]; - - # Add compression filter - if ($bCompress) - { - push( - @{$rhyFilter}, - {strClass => STORAGE_FILTER_GZIP, rxyParam => [STORAGE_COMPRESS, false, cfgOption(CFGOPT_COMPRESS_LEVEL)]}); - } - - # If the backups are encrypted, then the passphrase for the backup set from the manifest file is required to access - # the file in the repo - my $oDestinationFileIo = $oStorageRepo->openWrite( - STORAGE_REPO_BACKUP . "/${strBackupLabel}/${strFile}" . ($bCompress ? qw{.} . COMPRESS_EXT : ''), - {rhyFilter => $rhyFilter, - strCipherPass => defined($strCipherPassBackupSet) ? $strCipherPassBackupSet : undef}); - - # Write content out to a file - $oStorageRepo->put($oDestinationFileIo, $oFileHash->{$strFile}); - - # Add file to manifest - $oBackupManifest->fileAdd( - $strFile, time(), length($oFileHash->{$strFile}), $oDestinationFileIo->result(STORAGE_FILTER_SHA), true); - - &log(DETAIL, "wrote '${strFile}' file returned from pg_stop_backup()"); - } - } - } - - # Remotes no longer needed (destroy them here so they don't timeout) - undef($oDbMaster); - protocolDestroy(undef, undef, true); - - # If archive logs are required to complete the backup, then check them. This is the default, but can be overridden if the - # archive logs are going to a different server. Be careful of this option because there is no way to verify that the backup - # will be consistent - at least not here. - if (cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_ARCHIVE_CHECK)) - { - # Save the backup manifest before getting archive logs in case of failure - $oBackupManifest->saveCopy(); - - # Create the modification time for the archive logs - my $lModificationTime = time(); - - # After the backup has been stopped, need to make a copy of the archive logs to make the db consistent - logDebugMisc($strOperation, "retrieve archive logs ${strArchiveStart}:${strArchiveStop}"); - - my $oArchiveInfo = new pgBackRest::Archive::Info(storageRepo()->pathGet(STORAGE_REPO_ARCHIVE), true); - my $strArchiveId = $oArchiveInfo->archiveId(); - my @stryArchive = lsnFileRange($strLsnStart, $strLsnStop, $strDbVersion, $iWalSegmentSize); - - foreach my $strArchive (@stryArchive) - { - my $strArchiveFile = walSegmentFind( - $oStorageRepo, $strArchiveId, substr($strArchiveStop, 0, 8) . $strArchive, cfgOption(CFGOPT_ARCHIVE_TIMEOUT)); - - $strArchive = substr($strArchiveFile, 0, 24); - - if (cfgOption(CFGOPT_ARCHIVE_COPY)) - { - logDebugMisc($strOperation, "archive: ${strArchive} (${strArchiveFile})"); - - # Copy the log file from the archive repo to the backup - my $bArchiveCompressed = $strArchiveFile =~ ('^.*\.' . COMPRESS_EXT . '\$'); - - $oStorageRepo->copy( - $oStorageRepo->openRead(STORAGE_REPO_ARCHIVE . "/${strArchiveId}/${strArchiveFile}", - {strCipherPass => $oArchiveInfo->cipherPassSub()}), - $oStorageRepo->openWrite(STORAGE_REPO_BACKUP . "/${strBackupLabel}/" . MANIFEST_TARGET_PGDATA . qw{/} . - $oBackupManifest->walPath() . "/${strArchive}" . ($bCompress ? qw{.} . COMPRESS_EXT : ''), - {bPathCreate => true, strCipherPass => $strCipherPassBackupSet}) - ); - - # Add the archive file to the manifest so it can be part of the restore and checked in validation - my $strPathLog = MANIFEST_TARGET_PGDATA . qw{/} . $oBackupManifest->walPath(); - my $strFileLog = "${strPathLog}/${strArchive}"; - - # Add file to manifest - $oBackupManifest->fileAdd( - $strFileLog, $lModificationTime, PG_WAL_SEGMENT_SIZE, substr($strArchiveFile, 25, 40), true); - } - } - } - - # Record timestamp stop in the config - my $lTimestampStop = time(); - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_STOP, undef, $lTimestampStop + 0); - $oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL, undef, $strBackupLabel); - - # Sync backup path if supported - if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC)) - { - # Sync all paths in the backup - $oStorageRepo->pathSync(STORAGE_REPO_BACKUP . "/${strBackupLabel}"); - - foreach my $strPath ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_PATH)) - { - my $strPathSync = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . "/${strBackupLabel}/$strPath"); - - # Not all paths are created for diff/incr backups, so only sync if this is a full backup or the path exists - if ($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $oStorageRepo->pathExists($strPathSync)) - { - $oStorageRepo->pathSync($strPathSync); - } - } - } - - # Final save of the backup manifest - $oBackupManifest->save(); - - &log(INFO, "new backup label = ${strBackupLabel}"); - - # Copy a compressed version of the manifest to history. If the repo is encrypted then the passphrase to open the manifest is - # required. - my $strHistoryPath = $oStorageRepo->pathGet( - STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY . qw{/} . substr($strBackupLabel, 0, 4)); - - $oStorageRepo->copy( - $oStorageRepo->openRead(STORAGE_REPO_BACKUP . "/${strBackupLabel}/" . FILE_MANIFEST, - {'strCipherPass' => $strCipherPassManifest}), - $oStorageRepo->openWrite( - "${strHistoryPath}/${strBackupLabel}.manifest." . COMPRESS_EXT, - {rhyFilter => [{strClass => STORAGE_FILTER_GZIP, rxyParam => [STORAGE_COMPRESS, false, 9]}], - bPathCreate => true, bAtomic => true, - strCipherPass => defined($strCipherPassManifest) ? $strCipherPassManifest : undef})); - - # Sync history path if supported - if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC)) - { - $oStorageRepo->pathSync(STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY); - $oStorageRepo->pathSync($strHistoryPath); - } - - # Create a link to the most recent backup - $oStorageRepo->remove(STORAGE_REPO_BACKUP . qw(/) . LINK_LATEST); - - if (storageRepo()->capability(STORAGE_CAPABILITY_LINK)) - { - $oStorageRepo->linkCreate( - STORAGE_REPO_BACKUP . "/${strBackupLabel}", STORAGE_REPO_BACKUP . qw{/} . LINK_LATEST, {bRelative => true}); - } - - # Save backup info - $oBackupInfo->add($oBackupManifest); - - # Sync backup root path if supported - if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC)) - { - $oStorageRepo->pathSync(STORAGE_REPO_BACKUP); - } - - # Return from function and log return values if any - return logDebugReturn($strOperation); -} - -1; diff --git a/lib/pgBackRest/Backup/File.pm b/lib/pgBackRest/Backup/File.pm deleted file mode 100644 index e2325b37d..000000000 --- a/lib/pgBackRest/Backup/File.pm +++ /dev/null @@ -1,267 +0,0 @@ -#################################################################################################################################### -# BACKUP FILE MODULE -#################################################################################################################################### -package pgBackRest::Backup::File; - -use strict; -use warnings FATAL => qw(all); -use Carp qw(confess); - -use Exporter qw(import); - our @EXPORT = qw(); -use File::Basename qw(dirname); -use Storable qw(dclone); - -use pgBackRest::Common::Exception; -use pgBackRest::Common::Io::Handle; -use pgBackRest::Common::Log; -use pgBackRest::Common::String; -use pgBackRest::Config::Config; -use pgBackRest::DbVersion; -use pgBackRest::Manifest; -use pgBackRest::Protocol::Storage::Helper; -use pgBackRest::Storage::Base; -use pgBackRest::Storage::Helper; - -#################################################################################################################################### -# Result constants -#################################################################################################################################### -use constant BACKUP_FILE_CHECKSUM => 0; - push @EXPORT, qw(BACKUP_FILE_CHECKSUM); -use constant BACKUP_FILE_COPY => 1; - push @EXPORT, qw(BACKUP_FILE_COPY); -use constant BACKUP_FILE_RECOPY => 2; - push @EXPORT, qw(BACKUP_FILE_RECOPY); -use constant BACKUP_FILE_SKIP => 3; - push @EXPORT, qw(BACKUP_FILE_SKIP); -use constant BACKUP_FILE_NOOP => 4; - push @EXPORT, qw(BACKUP_FILE_NOOP); - -#################################################################################################################################### -# backupManifestUpdate -#################################################################################################################################### -sub backupManifestUpdate -{ - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $oManifest, - $strHost, - $iLocalId, - $strDbFile, - $strRepoFile, - $lSize, - $strChecksum, - $bChecksumPage, - $iCopyResult, - $lSizeCopy, - $lSizeRepo, - $strChecksumCopy, - $rExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent - ) = - logDebugParam - ( - __PACKAGE__ . '::backupManifestUpdate', \@_, - {name => 'oManifest', trace => true}, - {name => 'strHost', required => false, trace => true}, - {name => 'iLocalId', required => false, trace => true}, - - # Parameters to backupFile() - {name => 'strDbFile', trace => true}, - {name => 'strRepoFile', trace => true}, - {name => 'lSize', required => false, trace => true}, - {name => 'strChecksum', required => false, trace => true}, - {name => 'bChecksumPage', trace => true}, - - # Results from backupFile() - {name => 'iCopyResult', trace => true}, - {name => 'lSizeCopy', required => false, trace => true}, - {name => 'lSizeRepo', required => false, trace => true}, - {name => 'strChecksumCopy', required => false, trace => true}, - {name => 'rExtra', required => false, trace => true}, - - # Accumulators - {name => 'lSizeTotal', trace => true}, - {name => 'lSizeCurrent', trace => true}, - {name => 'lManifestSaveSize', trace => true}, - {name => 'lManifestSaveCurrent', trace => true} - ); - - # Increment current backup progress - $lSizeCurrent += $lSize; - - # If the file is in a prior backup and nothing changed, then nothing needs to be done - if ($iCopyResult == BACKUP_FILE_NOOP) - { - # File copy was not needed so just restore the size and checksum to the manifest - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE, $lSizeCopy); - $oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksumCopy); - - &log(DETAIL, - 'match file from prior backup ' . (defined($strHost) ? "${strHost}:" : '') . "${strDbFile} (" . - fileSizeFormat($lSizeCopy) . ', ' . int($lSizeCurrent * 100 / $lSizeTotal) . '%)' . - ($lSizeCopy != 0 ? " checksum ${strChecksumCopy}" : ''), - undef, undef, undef, $iLocalId); - } - # Else process the results - else - { - # Log invalid checksum - if ($iCopyResult == BACKUP_FILE_RECOPY) - { - &log( - WARN, - "resumed backup file ${strRepoFile} does not have expected checksum ${strChecksum}. The file will be recopied and" . - " backup will continue but this may be an issue unless the resumed backup path in the repository is known to be" . - " corrupted.\n" . - "NOTE: this does not indicate a problem with the PostgreSQL page checksums."); - } - - # If copy was successful store the checksum and size - if ($iCopyResult == BACKUP_FILE_COPY || $iCopyResult == BACKUP_FILE_RECOPY || $iCopyResult == BACKUP_FILE_CHECKSUM) - { - # Log copy or checksum - &log($iCopyResult == BACKUP_FILE_CHECKSUM ? DETAIL : INFO, - ($iCopyResult == BACKUP_FILE_CHECKSUM ? - 'checksum resumed file ' : 'backup file ' . (defined($strHost) ? "${strHost}:" : '')) . - "${strDbFile} (" . fileSizeFormat($lSizeCopy) . - ', ' . int($lSizeCurrent * 100 / $lSizeTotal) . '%)' . - ($lSizeCopy != 0 ? " checksum ${strChecksumCopy}" : ''), undef, undef, undef, $iLocalId); - - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE, $lSizeCopy); - - if ($lSizeRepo != $lSizeCopy) - { - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REPO_SIZE, $lSizeRepo); - } - - if ($lSizeCopy > 0) - { - $oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksumCopy); - } - - # If the file was copied, then remove any reference to the file's existence in a prior backup. - if ($iCopyResult == BACKUP_FILE_COPY || $iCopyResult == BACKUP_FILE_RECOPY) - { - $oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REFERENCE); - } - - # If the file had page checksums calculated during the copy - if ($bChecksumPage) - { - # The valid flag should be set - if (defined($rExtra->{valid})) - { - # Store the valid flag - $oManifest->boolSet( - MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, $rExtra->{valid}); - - # If the page was not valid - if (!$rExtra->{valid}) - { - # Check for a page misalignment - if ($lSizeCopy % PG_PAGE_SIZE != 0) - { - # Make sure the align flag was set, otherwise there is a bug - if (!defined($rExtra->{align}) || $rExtra->{align}) - { - confess &log(ASSERT, 'align flag should have been set for misaligned page'); - } - - # Emit a warning so the user knows something is amiss - &log(WARN, - 'page misalignment in file ' . (defined($strHost) ? "${strHost}:" : '') . - "${strDbFile}: file size ${lSizeCopy} is not divisible by page size " . PG_PAGE_SIZE); - } - # Else process the page check errors - else - { - $oManifest->set( - MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR, - dclone($rExtra->{error})); - - # Build a pretty list of the page errors - my $strPageError; - my $iPageErrorTotal = 0; - - foreach my $iyPage (@{$rExtra->{error}}) - { - $strPageError .= (defined($strPageError) ? ', ' : ''); - - # If a range of pages - if (ref($iyPage)) - { - $strPageError .= $$iyPage[0] . '-' . $$iyPage[1]; - $iPageErrorTotal += ($$iyPage[1] - $$iyPage[0]) + 1; - } - # Else a single page - else - { - $strPageError .= $iyPage; - $iPageErrorTotal += 1; - } - } - - # There should be at least one page in the error list - if ($iPageErrorTotal == 0) - { - confess &log(ASSERT, 'page checksum error list should have at least one entry'); - } - - # Emit a warning so the user knows something is amiss - &log(WARN, - 'invalid page checksum' . ($iPageErrorTotal > 1 ? 's' : '') . - ' found in file ' . (defined($strHost) ? "${strHost}:" : '') . "${strDbFile} at page" . - ($iPageErrorTotal > 1 ? 's' : '') . " ${strPageError}"); - } - } - } - # If it's not set that's a bug in the code - elsif (!$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE)) - { - confess &log(ASSERT, "${strDbFile} should have calculated page checksums"); - } - } - } - # Else the file was removed during backup so remove from manifest - elsif ($iCopyResult == BACKUP_FILE_SKIP) - { - &log(DETAIL, 'skip file removed by database ' . (defined($strHost) ? "${strHost}:" : '') . $strDbFile); - $oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile); - } - } - - # Determine whether to save the manifest - $lManifestSaveCurrent += $lSize; - - if ($lManifestSaveCurrent >= $lManifestSaveSize) - { - $oManifest->saveCopy(); - - logDebugMisc - ( - $strOperation, 'save manifest', - {name => 'lManifestSaveSize', value => $lManifestSaveSize}, - {name => 'lManifestSaveCurrent', value => $lManifestSaveCurrent} - ); - - $lManifestSaveCurrent = 0; - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'lSizeCurrent', value => $lSizeCurrent, trace => true}, - {name => 'lManifestSaveCurrent', value => $lManifestSaveCurrent, trace => true}, - ); -} - -push @EXPORT, qw(backupManifestUpdate); - -1; diff --git a/lib/pgBackRest/Db.pm b/lib/pgBackRest/Db.pm index cb612a97d..368896fd2 100644 --- a/lib/pgBackRest/Db.pm +++ b/lib/pgBackRest/Db.pm @@ -25,13 +25,6 @@ use pgBackRest::Protocol::Helper; use pgBackRest::Protocol::Storage::Helper; use pgBackRest::Version; -#################################################################################################################################### -# PostgreSQL 8.3 WAL size -# -# WAL segment size in 8.3 cannot be determined from pg_control, so use this constant instead. -#################################################################################################################################### -use constant PG_WAL_SIZE_83 => 16777216; - #################################################################################################################################### # Backup advisory lock #################################################################################################################################### @@ -330,61 +323,6 @@ sub executeSqlOne ); } -#################################################################################################################################### -# tablespaceMapGet -# -# Get the mapping between oid and tablespace name. -#################################################################################################################################### -sub tablespaceMapGet -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my ($strOperation) = logDebugParam(__PACKAGE__ . '->tablespaceMapGet'); - - my $hTablespaceMap = {}; - - for my $strRow (@{$self->executeSql('select oid, spcname from pg_tablespace')}) - { - $hTablespaceMap->{@{$strRow}[0]} = @{$strRow}[1]; - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'hTablespaceMap', value => $hTablespaceMap} - ); -} - -#################################################################################################################################### -# databaseMapGet -# -# Get the mapping between oid and database name. -#################################################################################################################################### -sub databaseMapGet -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my ($strOperation) = logDebugParam(__PACKAGE__ . '->databaseMapGet'); - - my $hDatabaseMap = {}; - - for my $strRow (@{$self->executeSql('select datname, oid, datlastsysoid from pg_database')}) - { - $hDatabaseMap->{@{$strRow}[0]}{&MANIFEST_KEY_DB_ID} = @{$strRow}[1]; - $hDatabaseMap->{@{$strRow}[0]}{&MANIFEST_KEY_DB_LAST_SYSTEM_ID} = @{$strRow}[2]; - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'hDatabaseMap', value => $hDatabaseMap} - ); -} - #################################################################################################################################### # info #################################################################################################################################### @@ -527,157 +465,6 @@ sub versionGet ); } -#################################################################################################################################### -# backupStart -#################################################################################################################################### -sub backupStart -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strLabel, - $bStartFast - ) = - logDebugParam - ( - __PACKAGE__ . '->backupStart', \@_, - {name => 'strLabel'}, - {name => 'bStartFast'} - ); - - # Validate the database configuration - $self->configValidate(); - - # Only allow start-fast option for version >= 8.4 - if ($self->{strDbVersion} < PG_VERSION_84 && $bStartFast) - { - &log(WARN, cfgOptionName(CFGOPT_START_FAST) . ' option is only available in PostgreSQL >= ' . PG_VERSION_84); - $bStartFast = false; - } - - # Determine if page checksums can be enabled - my $bChecksumPage = - $self->executeSqlOne("select count(*) = 1 from pg_settings where name = 'data_checksums' and setting = 'on'"); - - # If checksum page option is not explicitly set then set it to whatever the database says - if (!cfgOptionTest(CFGOPT_CHECKSUM_PAGE)) - { - cfgOptionSet(CFGOPT_CHECKSUM_PAGE, $bChecksumPage); - } - # Else if enabled make sure they are in the database as well, else throw a warning - elsif (cfgOption(CFGOPT_CHECKSUM_PAGE) && !$bChecksumPage) - { - &log(WARN, 'unable to enable page checksums since they are not enabled in the database'); - cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false); - } - - # Acquire the backup advisory lock to make sure that backups are not running from multiple backup servers against the same - # database cluster. This lock helps make the stop-auto option safe. - if (!$self->executeSqlOne('select pg_try_advisory_lock(' . DB_BACKUP_ADVISORY_LOCK . ')')) - { - confess &log(ERROR, 'unable to acquire ' . PROJECT_NAME . " advisory lock\n" . - 'HINT: is another ' . PROJECT_NAME . ' backup already running on this cluster?', ERROR_LOCK_ACQUIRE); - } - - # If stop-auto is enabled check for a running backup. This feature is not supported for PostgreSQL >= 9.6 since backups are - # run in non-exclusive mode. - if (cfgOption(CFGOPT_STOP_AUTO) && $self->{strDbVersion} < PG_VERSION_96) - { - # Running backups can only be detected in PostgreSQL >= 9.3 - if ($self->{strDbVersion} >= PG_VERSION_93) - { - # If a backup is currently in progress emit a warning and then stop it - if ($self->executeSqlOne('select pg_is_in_backup()')) - { - &log(WARN, 'the cluster is already in backup mode but no ' . PROJECT_NAME . ' backup process is running.' . - ' pg_stop_backup() will be called so a new backup can be started.'); - $self->backupStop(); - } - } - # Else emit a warning that the feature is not supported and continue. If a backup is running then an error will be - # generated later on. - else - { - &log(WARN, cfgOptionName(CFGOPT_STOP_AUTO) . ' option is only available in PostgreSQL >= ' . PG_VERSION_93); - } - } - - # Start the backup - &log(INFO, 'execute ' . ($self->{strDbVersion} >= PG_VERSION_96 ? 'non-' : '') . - "exclusive pg_start_backup() with label \"${strLabel}\": backup begins after " . - ($bStartFast ? "the requested immediate checkpoint" : "the next regular checkpoint") . " completes"); - - my ($strTimestampDbStart, $strArchiveStart, $strLsnStart, $iWalSegmentSize) = $self->executeSqlRow( - "select to_char(current_timestamp, 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_" . $self->walId() . "file_name(lsn), lsn::text," . - ($self->{strDbVersion} < PG_VERSION_84 ? PG_WAL_SIZE_83 : - " (select setting::int8 from pg_settings where name = 'wal_segment_size')" . - # In Pre-11 versions the wal_segment_sise was expressed in terms of blocks rather than total size - ($self->{strDbVersion} < PG_VERSION_11 ? - " * (select setting::int8 from pg_settings where name = 'wal_block_size')" : '')) . - " from pg_start_backup('${strLabel}'" . - ($bStartFast ? ', true' : $self->{strDbVersion} >= PG_VERSION_84 ? ', false' : '') . - ($self->{strDbVersion} >= PG_VERSION_96 ? ', false' : '') . ') as lsn'); - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'strArchiveStart', value => $strArchiveStart}, - {name => 'strLsnStart', value => $strLsnStart}, - {name => 'iWalSegmentSize', value => $iWalSegmentSize}, - {name => 'strTimestampDbStart', value => $strTimestampDbStart} - ); -} - -#################################################################################################################################### -# backupStop -#################################################################################################################################### -sub backupStop -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my ($strOperation) = logDebugParam(__PACKAGE__ . '->backupStop'); - - # Stop the backup - &log(INFO, 'execute ' . ($self->{strDbVersion} >= PG_VERSION_96 ? 'non-' : '') . - 'exclusive pg_stop_backup() and wait for all WAL segments to archive'); - - my ($strTimestampDbStop, $strArchiveStop, $strLsnStop, $strLabel, $strTablespaceMap) = - $self->executeSqlRow( - "select to_char(clock_timestamp(), 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_" . - $self->walId() . "file_name(lsn), lsn::text, " . - ($self->{strDbVersion} >= PG_VERSION_96 ? - 'labelfile, ' . - 'case when length(trim(both \'\t\n \' from spcmapfile)) = 0 then null else spcmapfile end as spcmapfile' : - 'null as labelfile, null as spcmapfile') . - ' from pg_stop_backup(' . - # Add flag to use non-exclusive backup - ($self->{strDbVersion} >= PG_VERSION_96 ? 'false' : '') . - # Add flag to exit immediately after backup stop rather than waiting for WAL to archive (this is checked later) - ($self->{strDbVersion} >= PG_VERSION_10 ? ', false' : '') . ') as lsn'); - - # Build a hash of the files that need to be written to the backup - my $oFileHash = - { - &MANIFEST_FILE_BACKUPLABEL => $strLabel, - &MANIFEST_FILE_TABLESPACEMAP => $strTablespaceMap - }; - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'strArchiveStop', value => $strArchiveStop}, - {name => 'strLsnStop', value => $strLsnStop}, - {name => 'strTimestampDbStop', value => $strTimestampDbStop}, - {name => 'oFileHash', value => $oFileHash} - ); -} - #################################################################################################################################### # configValidate # @@ -761,18 +548,6 @@ sub walId return $self->{strDbVersion} >= PG_VERSION_10 ? 'wal' : 'xlog'; } -#################################################################################################################################### -# lsnId -# -# Returns 'lsn' or 'location' depending on the version of PostgreSQL. -#################################################################################################################################### -sub lsnId -{ - my $self = shift; - - return $self->{strDbVersion} >= PG_VERSION_10 ? 'lsn' : 'location'; -} - #################################################################################################################################### # isStandby # @@ -807,119 +582,6 @@ sub isStandby ); } -#################################################################################################################################### -# replayWait -# -# Waits for replay on the standby to equal specified LSN -#################################################################################################################################### -sub replayWait -{ - my $self = shift; - - # Assign function parameters, defaults, and log debug info - my - ( - $strOperation, - $strTargetLSN, - ) = - logDebugParam - ( - __PACKAGE__ . '->replayWait', \@_, - {name => 'strTargetLSN'} - ); - - # Load ArchiveCommon Module - require pgBackRest::Archive::Common; - pgBackRest::Archive::Common->import(); - - # Initialize working variables - my $oWait = waitInit(cfgOption(CFGOPT_ARCHIVE_TIMEOUT)); - my $bTimeout = true; - my $strReplayedLSN = undef; - - # Monitor the replay location - do - { - my $strLastWalReplayLsnFunction = - 'pg_last_' . $self->walId() . '_replay_' . $self->lsnId() . '()'; - - # Get the replay location - my $strLastReplayedLSN = $self->executeSqlOne( - "select coalesce(${strLastWalReplayLsnFunction}::text, '')"); - - # Error if the replay location could not be retrieved - if ($strLastReplayedLSN eq '') - { - confess &log( - ERROR, - "unable to query replay lsn on the standby using ${strLastWalReplayLsnFunction}\n" . - "Hint: Is this a standby?", - ERROR_ARCHIVE_TIMEOUT); - } - - # Is the replay lsn > target lsn? It needs to be greater because the checkpoint record is directly after the LSN returned - # by pg_start_backup(). - if (lsnNormalize($strLastReplayedLSN) ge lsnNormalize($strTargetLSN)) - { - $bTimeout = false; - } - else - { - # Reset the timer if the LSN is advancing - if (defined($strReplayedLSN) && - lsnNormalize($strLastReplayedLSN) gt lsnNormalize($strReplayedLSN) && - !waitMore($oWait)) - { - $oWait = waitInit(cfgOption(CFGOPT_ARCHIVE_TIMEOUT)); - } - } - - # Assigned last replayed to replayed - $strReplayedLSN = $strLastReplayedLSN; - - } while ($bTimeout && waitMore($oWait)); - - # Error if a timeout occurred before the target lsn was reached - if ($bTimeout == true) - { - confess &log( - ERROR, "timeout before standby replayed ${strTargetLSN} - only reached ${strReplayedLSN}", ERROR_ARCHIVE_TIMEOUT); - } - - # Perform a checkpoint - $self->executeSql('checkpoint', undef, false); - - # On PostgreSQL >= 9.6 the checkpoint location can be verified - # - # ??? We have seen one instance where this check failed. Is there any chance that the replayed position could be ahead of the - # checkpoint recorded in pg_control? It seems possible, so in the C version of this add a loop to keep checking pg_control - # until the checkpoint has been recorded. - my $strCheckpointLSN = undef; - - if ($self->{strDbVersion} >= PG_VERSION_96) - { - $strCheckpointLSN = $self->executeSqlOne('select checkpoint_' . $self->lsnId() .'::text from pg_control_checkpoint()'); - - if (lsnNormalize($strCheckpointLSN) le lsnNormalize($strTargetLSN)) - { - confess &log( - ERROR, - "the checkpoint location ${strCheckpointLSN} is less than the target location ${strTargetLSN} even though the" . - " replay location is ${strReplayedLSN}\n" . - "Hint: This should not be possible and may indicate a bug in PostgreSQL.", - ERROR_ARCHIVE_TIMEOUT); - } - } - - # Return from function and log return values if any - return logDebugReturn - ( - $strOperation, - {name => 'strReplayedLSN', value => $strReplayedLSN}, - {name => 'strCheckpointLSN', value => $strCheckpointLSN}, - ); -} - #################################################################################################################################### # dbObjectGet # diff --git a/lib/pgBackRest/Main.pm b/lib/pgBackRest/Main.pm index 355cdded4..80cee4aa5 100644 --- a/lib/pgBackRest/Main.pm +++ b/lib/pgBackRest/Main.pm @@ -13,7 +13,6 @@ $SIG{__DIE__} = sub {Carp::confess @_}; use File::Basename qw(dirname); -use pgBackRest::Backup::Info; use pgBackRest::Common::Exception; use pgBackRest::Common::Lock; use pgBackRest::Common::Log; @@ -115,27 +114,6 @@ sub main logFileSet( storageLocal(), cfgOption(CFGOPT_LOG_PATH) . '/' . cfgOption(CFGOPT_STANZA) . '-' . lc(cfgCommandName(cfgCommandGet()))); - - # Check if processes have been stopped - lockStopTest(); - - # Check locality - if (!isRepoLocal()) - { - confess &log(ERROR, - cfgCommandName(cfgCommandGet()) . ' command must be run on the repository host', ERROR_HOST_INVALID); - } - - # Process backup command - # ---------------------------------------------------------------------------------------------------------------------- - if (cfgCommandTest(CFGCMD_BACKUP)) - { - # Load module dynamically - require pgBackRest::Backup::Backup; - pgBackRest::Backup::Backup->import(); - - new pgBackRest::Backup::Backup()->process(); - } } return 1; diff --git a/src/Makefile.in b/src/Makefile.in index 885a66885..909432d3d 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -51,6 +51,7 @@ SRCS = \ command/archive/push/file.c \ command/archive/push/protocol.c \ command/archive/push/push.c \ + command/backup/backup.c \ command/backup/common.c \ command/backup/file.c \ command/backup/pageChecksum.c \ @@ -234,6 +235,9 @@ command/archive/push/protocol.o: command/archive/push/protocol.c build.auto.h co command/archive/push/push.o: command/archive/push/push.c build.auto.h command/archive/common.h command/archive/push/file.h command/archive/push/protocol.h command/command.h command/control/common.h common/assert.h common/crypto/common.h common/debug.h common/error.auto.h common/error.h common/fork.h common/ini.h common/io/filter/filter.h common/io/filter/group.h common/io/read.h common/io/write.h common/lock.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/keyValue.h common/type/list.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h common/wait.h config/config.auto.h config/config.h config/define.auto.h config/define.h config/exec.h info/info.h info/infoArchive.h info/infoPg.h postgres/interface.h protocol/client.h protocol/command.h protocol/helper.h protocol/parallel.h protocol/parallelJob.h protocol/server.h storage/helper.h storage/info.h storage/read.h storage/storage.h storage/write.h $(CC) $(CPPFLAGS) $(CFLAGS) $(CMAKE) -c command/archive/push/push.c -o command/archive/push/push.o +command/backup/backup.o: command/backup/backup.c build.auto.h command/archive/common.h command/backup/backup.h command/backup/common.h command/backup/file.h command/backup/protocol.h command/check/common.h command/control/common.h command/stanza/common.h common/assert.h common/compress/gzip/common.h common/compress/gzip/compress.h common/compress/gzip/decompress.h common/crypto/cipherBlock.h common/crypto/common.h common/crypto/hash.h common/debug.h common/error.auto.h common/error.h common/ini.h common/io/filter/filter.h common/io/filter/group.h common/io/filter/size.h common/io/read.h common/io/write.h common/lock.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/keyValue.h common/type/list.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h config/config.auto.h config/config.h config/define.auto.h config/define.h db/db.h db/helper.h info/info.h info/infoArchive.h info/infoBackup.h info/infoPg.h info/manifest.h postgres/client.h postgres/interface.h postgres/version.h protocol/client.h protocol/command.h protocol/helper.h protocol/parallel.h protocol/parallelJob.h protocol/server.h storage/helper.h storage/info.h storage/read.h storage/storage.h storage/write.h version.h + $(CC) $(CPPFLAGS) $(CFLAGS) $(CMAKE) -c command/backup/backup.c -o command/backup/backup.o + command/backup/common.o: command/backup/common.c build.auto.h command/backup/common.h common/assert.h common/debug.h common/error.auto.h common/error.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/type/buffer.h common/type/convert.h common/type/string.h common/type/stringz.h $(CC) $(CPPFLAGS) $(CFLAGS) $(CMAKE) -c command/backup/common.c -o command/backup/common.o @@ -498,7 +502,7 @@ info/infoPg.o: info/infoPg.c build.auto.h common/assert.h common/crypto/common.h info/manifest.o: info/manifest.c build.auto.h command/backup/common.h common/assert.h common/crypto/cipherBlock.h common/crypto/common.h common/crypto/hash.h common/debug.h common/error.auto.h common/error.h common/ini.h common/io/filter/filter.h common/io/filter/group.h common/io/read.h common/io/write.h common/log.h common/logLevel.h common/macro.h common/memContext.h common/object.h common/regExp.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/json.h common/type/keyValue.h common/type/list.h common/type/mcv.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h info/info.h info/manifest.h postgres/interface.h postgres/version.h storage/info.h storage/read.h storage/storage.h storage/write.h version.h $(CC) $(CPPFLAGS) $(CFLAGS) $(CMAKE) -c info/manifest.c -o info/manifest.o -main.o: main.c build.auto.h command/archive/get/get.h command/archive/push/push.h command/check/check.h command/command.h command/control/start.h command/control/stop.h command/expire/expire.h command/help/help.h command/info/info.h command/local/local.h command/remote/remote.h command/restore/restore.h command/stanza/create.h command/stanza/delete.h command/stanza/upgrade.h command/storage/list.h common/assert.h common/debug.h common/error.auto.h common/error.h common/exit.h common/io/filter/filter.h common/io/filter/group.h common/io/read.h common/io/write.h common/lock.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/keyValue.h common/type/list.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h config/config.auto.h config/config.h config/define.auto.h config/define.h config/load.h perl/exec.h postgres/interface.h storage/helper.h storage/info.h storage/read.h storage/storage.h storage/write.h version.h +main.o: main.c build.auto.h command/archive/get/get.h command/archive/push/push.h command/backup/backup.h command/check/check.h command/command.h command/control/start.h command/control/stop.h command/expire/expire.h command/help/help.h command/info/info.h command/local/local.h command/remote/remote.h command/restore/restore.h command/stanza/create.h command/stanza/delete.h command/stanza/upgrade.h command/storage/list.h common/assert.h common/debug.h common/error.auto.h common/error.h common/exit.h common/io/filter/filter.h common/io/filter/group.h common/io/read.h common/io/write.h common/lock.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/keyValue.h common/type/list.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h config/config.auto.h config/config.h config/define.auto.h config/define.h config/load.h perl/exec.h postgres/interface.h storage/helper.h storage/info.h storage/read.h storage/storage.h storage/write.h version.h $(CC) $(CPPFLAGS) $(CFLAGS) $(CMAKE) -c main.c -o main.o perl/config.o: perl/config.c build.auto.h common/assert.h common/debug.h common/error.auto.h common/error.h common/lock.h common/log.h common/logLevel.h common/memContext.h common/stackTrace.h common/time.h common/type/buffer.h common/type/convert.h common/type/json.h common/type/keyValue.h common/type/list.h common/type/param.h common/type/string.h common/type/stringList.h common/type/stringz.h common/type/variant.h common/type/variantList.h config/config.auto.h config/config.h config/define.auto.h config/define.h diff --git a/src/command/backup/backup.c b/src/command/backup/backup.c new file mode 100644 index 000000000..96ba3bca9 --- /dev/null +++ b/src/command/backup/backup.c @@ -0,0 +1,1986 @@ +/*********************************************************************************************************************************** +Backup Command +***********************************************************************************************************************************/ +#include "build.auto.h" + +#include +#include +#include +#include + +#include "command/archive/common.h" +#include "command/control/common.h" +#include "command/backup/backup.h" +#include "command/backup/common.h" +#include "command/backup/file.h" +#include "command/backup/protocol.h" +#include "command/check/common.h" +#include "command/stanza/common.h" +#include "common/crypto/cipherBlock.h" +#include "common/compress/gzip/common.h" +#include "common/compress/gzip/compress.h" +#include "common/compress/gzip/decompress.h" +#include "common/debug.h" +#include "common/io/filter/size.h" +#include "common/log.h" +#include "common/time.h" +#include "common/type/convert.h" +#include "config/config.h" +#include "db/helper.h" +#include "info/infoArchive.h" +#include "info/infoBackup.h" +#include "info/manifest.h" +#include "postgres/interface.h" +#include "postgres/version.h" +#include "protocol/helper.h" +#include "protocol/parallel.h" +#include "storage/helper.h" +#include "version.h" + +/*********************************************************************************************************************************** +Backup constants +***********************************************************************************************************************************/ +#define BACKUP_PATH_HISTORY "backup.history" +#define BACKUP_LINK_LATEST "latest" + +/********************************************************************************************************************************** +Generate a unique backup label that does not contain a timestamp from a previous backup +***********************************************************************************************************************************/ +// Helper to format the backup label +static String * +backupLabelFormat(BackupType type, const String *backupLabelPrior, time_t timestamp) +{ + FUNCTION_LOG_BEGIN(logLevelTrace); + FUNCTION_LOG_PARAM(ENUM, type); + FUNCTION_LOG_PARAM(STRING, backupLabelPrior); + FUNCTION_LOG_PARAM(TIME, timestamp); + FUNCTION_LOG_END(); + + ASSERT((type == backupTypeFull && backupLabelPrior == NULL) || (type != backupTypeFull && backupLabelPrior != NULL)); + ASSERT(timestamp > 0); + + // Format the timestamp + char buffer[16]; + THROW_ON_SYS_ERROR( + strftime(buffer, sizeof(buffer), "%Y%m%d-%H%M%S", localtime(×tamp)) == 0, AssertError, "unable to format time"); + + // If full label + String *result = NULL; + + if (type == backupTypeFull) + { + result = strNewFmt("%sF", buffer); + } + // Else diff or incr label + else + { + // Get the full backup portion of the prior backup label + result = strSubN(backupLabelPrior, 0, 16); + + // Append the diff/incr timestamp + strCatFmt(result, "_%s%s", buffer, type == backupTypeDiff ? "D" : "I"); + } + + FUNCTION_LOG_RETURN(STRING, result); +} + +static String * +backupLabelCreate(BackupType type, const String *backupLabelPrior, time_t timestamp) +{ + FUNCTION_LOG_BEGIN(logLevelTrace); + FUNCTION_LOG_PARAM(ENUM, type); + FUNCTION_LOG_PARAM(STRING, backupLabelPrior); + FUNCTION_LOG_PARAM(TIME, timestamp); + FUNCTION_LOG_END(); + + ASSERT((type == backupTypeFull && backupLabelPrior == NULL) || (type != backupTypeFull && backupLabelPrior != NULL)); + ASSERT(timestamp > 0); + + String *result = NULL; + + MEM_CONTEXT_TEMP_BEGIN() + { + const String *backupLabelLatest = NULL; + + // Get the newest backup + const StringList *backupList = strLstSort( + storageListP( + storageRepo(), STRDEF(STORAGE_REPO_BACKUP), + .expression = backupRegExpP(.full = true, .differential = true, .incremental = true)), + sortOrderDesc); + + if (strLstSize(backupList) > 0) + backupLabelLatest = strLstGet(backupList, 0); + + // Get the newest history + const StringList *historyYearList = strLstSort( + storageListP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY), .expression = STRDEF("^2[0-9]{3}$")), + sortOrderDesc); + + if (strLstSize(historyYearList) > 0) + { + const StringList *historyList = strLstSort( + storageListP( + storageRepo(), + strNewFmt(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY "/%s", strPtr(strLstGet(historyYearList, 0))), + .expression = strNewFmt( + "%s\\.manifest\\." GZIP_EXT "$", + strPtr(backupRegExpP(.full = true, .differential = true, .incremental = true, .noAnchorEnd = true)))), + sortOrderDesc); + + if (strLstSize(historyList) > 0) + { + const String *historyLabelLatest = strLstGet(historyList, 0); + + if (backupLabelLatest == NULL || strCmp(historyLabelLatest, backupLabelLatest) > 0) + backupLabelLatest = historyLabelLatest; + } + } + + // Now that we have the latest label check if the provided timestamp will give us an even later label + result = backupLabelFormat(type, backupLabelPrior, timestamp); + + if (backupLabelLatest != NULL && strCmp(result, backupLabelLatest) <= 0) + { + // If that didn't give us a later label then add one second. It's possible that two backups (they would need to be + // offline or halted online) have run very close together. + result = backupLabelFormat(type, backupLabelPrior, timestamp + 1); + + // If the label is still not latest then error. There is probably a timezone change or massive clock skew. + if (strCmp(result, backupLabelLatest) <= 0) + { + THROW_FMT( + FormatError, + "new backup label '%s' is not later than latest backup label '%s'\n" + "HINT: has the timezone changed?\n" + "HINT: is there clock skew?", + strPtr(result), strPtr(backupLabelLatest)); + } + + // If adding a second worked then sleep the remainder of the current second so we don't start early + sleepMSec(MSEC_PER_SEC - (timeMSec() % MSEC_PER_SEC)); + } + + memContextSwitch(MEM_CONTEXT_OLD()); + result = strDup(result); + memContextSwitch(MEM_CONTEXT_TEMP()); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN(STRING, result); +} + +/*********************************************************************************************************************************** +Get the postgres database and storage objects +***********************************************************************************************************************************/ +#define FUNCTION_LOG_BACKUP_DATA_TYPE \ + BackupData * +#define FUNCTION_LOG_BACKUP_DATA_FORMAT(value, buffer, bufferSize) \ + objToLog(value, "BackupData", buffer, bufferSize) + +typedef struct BackupData +{ + unsigned int pgIdPrimary; // Configuration id of the primary + Db *dbPrimary; // Database connection to the primary + const Storage *storagePrimary; // Storage object for the primary + const String *hostPrimary; // Host name of the primary + + unsigned int pgIdStandby; // Configuration id of the standby + Db *dbStandby; // Database connection to the standby + const Storage *storageStandby; // Storage object for the standby + const String *hostStandby; // Host name of the standby + + unsigned int version; // PostgreSQL version + unsigned int pageSize; // PostgreSQL page size + unsigned int walSegmentSize; // PostgreSQL wal segment size +} BackupData; + +static BackupData * +backupInit(const InfoBackup *infoBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup); + FUNCTION_LOG_END(); + + ASSERT(infoBackup != NULL); + + // Initialize for offline backup + BackupData *result = memNew(sizeof(BackupData)); + *result = (BackupData){.pgIdPrimary = 1}; + + // Check that the PostgreSQL version supports backup from standby. The check is done using the stanza info because pg_control + // cannot be loaded until a primary is found -- which will also lead to an error if the version does not support standby. If the + // pg_control version does not match the stanza version then there will be an error further down. + InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup)); + + if (cfgOptionBool(cfgOptOnline) && cfgOptionBool(cfgOptBackupStandby) && infoPg.version < PG_VERSION_BACKUP_STANDBY) + { + THROW_FMT( + ConfigError, "option '" CFGOPT_BACKUP_STANDBY "' not valid for " PG_NAME " < %s", + strPtr(pgVersionToStr(PG_VERSION_BACKUP_STANDBY))); + } + + // Don't allow backup from standby when offline + if (!cfgOptionBool(cfgOptOnline) && cfgOptionBool(cfgOptBackupStandby)) + { + LOG_WARN( + "option " CFGOPT_BACKUP_STANDBY " is enabled but backup is offline - backups will be performed from the primary"); + cfgOptionSet(cfgOptBackupStandby, cfgSourceParam, BOOL_FALSE_VAR); + } + + // Get database info when online + if (cfgOptionBool(cfgOptOnline)) + { + bool backupStandby = cfgOptionBool(cfgOptBackupStandby); + DbGetResult dbInfo = dbGet(!backupStandby, true, backupStandby); + + result->pgIdPrimary = dbInfo.primaryId; + result->dbPrimary = dbInfo.primary; + + if (backupStandby) + { + ASSERT(dbInfo.standbyId != 0); + + result->pgIdStandby = dbInfo.standbyId; + result->dbStandby = dbInfo.standby; + result->storageStandby = storagePgId(result->pgIdStandby); + result->hostStandby = cfgOptionStr(cfgOptPgHost + result->pgIdStandby - 1); + } + } + + // Add primary info + result->storagePrimary = storagePgId(result->pgIdPrimary); + result->hostPrimary = cfgOptionStr(cfgOptPgHost + result->pgIdPrimary - 1); + + // Get pg_control info from the primary + PgControl pgControl = pgControlFromFile(result->storagePrimary); + + result->version = pgControl.version; + result->pageSize = pgControl.pageSize; + result->walSegmentSize = pgControl.walSegmentSize; + + // Validate pg_control info against the stanza + if (result->version != infoPg.version || pgControl.systemId != infoPg.systemId) + { + THROW_FMT( + BackupMismatchError, + PG_NAME " version %s, system-id %" PRIu64 " do not match stanza version %s, system-id %" PRIu64 "\n" + "HINT: is this the correct stanza?", strPtr(pgVersionToStr(pgControl.version)), pgControl.systemId, + strPtr(pgVersionToStr(infoPg.version)), infoPg.systemId); + } + + // Only allow stop auto in PostgreSQL >= 9.3 and <= 9.5 + if (cfgOptionBool(cfgOptStopAuto) && (result->version < PG_VERSION_93 || result->version > PG_VERSION_95)) + { + LOG_WARN( + CFGOPT_STOP_AUTO " option is only available in " PG_NAME " >= " PG_VERSION_93_STR " and <= " PG_VERSION_95_STR); + cfgOptionSet(cfgOptStopAuto, cfgSourceParam, BOOL_FALSE_VAR); + } + + // Only allow start-fast option for PostgreSQL >= 8.4 + if (cfgOptionBool(cfgOptStartFast) && result->version < PG_VERSION_84) + { + LOG_WARN(CFGOPT_START_FAST " option is only available in " PG_NAME " >= " PG_VERSION_84_STR); + cfgOptionSet(cfgOptStartFast, cfgSourceParam, BOOL_FALSE_VAR); + } + + // If checksum page is not explicity set then automatically enable it when checksums are available + if (!cfgOptionTest(cfgOptChecksumPage)) + { + // If online then use the value in pg_control to set checksum-page + if (cfgOptionBool(cfgOptOnline)) + { + cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, VARBOOL(pgControl.pageChecksum)); + } + // Else set to false. An offline cluster is likely to have false positives so better if the user enables manually. + else + cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR); + } + // Else if checksums have been explicitly enabled but are not available then warn and reset. ??? We should be able to make this + // determination when offline as well, but the integration tests don't write pg_control accurately enough to support it. + else if (cfgOptionBool(cfgOptOnline) && !pgControl.pageChecksum && cfgOptionBool(cfgOptChecksumPage)) + { + LOG_WARN(CFGOPT_CHECKSUM_PAGE " option set to true but checksums are not enabled on the cluster, resetting to false"); + cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR); + } + + FUNCTION_LOG_RETURN(BACKUP_DATA, result); +} + +/********************************************************************************************************************************** +Get time from the database or locally depending on online +***********************************************************************************************************************************/ +static time_t +backupTime(BackupData *backupData, bool waitRemainder) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(BACKUP_DATA, backupData); + FUNCTION_LOG_PARAM(BOOL, waitRemainder); + FUNCTION_LOG_END(); + + // Offline backups will just grab the time from the local system since the value of copyStart is not important in this context. + // No worries about causing a delta backup since switching online will do that anyway. + time_t result = time(NULL); + + // When online get the time from the database server + if (cfgOptionBool(cfgOptOnline)) + { + // Get time from the database + TimeMSec timeMSec = dbTimeMSec(backupData->dbPrimary); + result = (time_t)(timeMSec / MSEC_PER_SEC); + + // Sleep the remainder of the second when requested (this is so copyStart is not subject to one second resolution issues) + if (waitRemainder) + { + sleepMSec(MSEC_PER_SEC - (timeMSec % MSEC_PER_SEC)); + + // Check time again to be sure we slept long enough + if (result >= (time_t)(dbTimeMSec(backupData->dbPrimary) / MSEC_PER_SEC)) + THROW(AssertError, "invalid sleep for online backup time with wait remainder"); + } + } + + FUNCTION_LOG_RETURN(TIME, result); +} + +/*********************************************************************************************************************************** +Create an incremental backup if type is not full and a compatible prior backup exists +***********************************************************************************************************************************/ +// Helper to find a compatible prior backup +static Manifest * +backupBuildIncrPrior(const InfoBackup *infoBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup); + FUNCTION_LOG_END(); + + ASSERT(infoBackup != NULL); + + Manifest *result = NULL; + + // No incremental if backup type is full + BackupType type = backupType(cfgOptionStr(cfgOptType)); + + if (type != backupTypeFull) + { + MEM_CONTEXT_TEMP_BEGIN() + { + InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup)); + const String *backupLabelPrior = NULL; + unsigned int backupTotal = infoBackupDataTotal(infoBackup); + + for (unsigned int backupIdx = backupTotal - 1; backupIdx < backupTotal; backupIdx--) + { + InfoBackupData backupPrior = infoBackupData(infoBackup, backupIdx); + + // The prior backup for a diff must be full + if (type == backupTypeDiff && backupType(backupPrior.backupType) != backupTypeFull) + continue; + + // The backups must come from the same cluster ??? This should enable delta instead + if (infoPg.id != backupPrior.backupPgId) + continue; + + // This backup is a candidate for prior + backupLabelPrior = strDup(backupPrior.backupLabel); + break; + } + + // If there is a prior backup then check that options for the new backup are compatible + if (backupLabelPrior != NULL) + { + result = manifestLoadFile( + storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strPtr(backupLabelPrior)), + cipherType(cfgOptionStr(cfgOptRepoCipherType)), infoPgCipherPass(infoBackupPg(infoBackup))); + const ManifestData *manifestPriorData = manifestData(result); + + LOG_INFO_FMT( + "last backup label = %s, version = %s", strPtr(manifestData(result)->backupLabel), + strPtr(manifestData(result)->backrestVersion)); + + // Warn if compress option changed + if (cfgOptionBool(cfgOptCompress) != manifestPriorData->backupOptionCompress) + { + LOG_WARN_FMT( + "%s backup cannot alter compress option to '%s', reset to value in %s", + strPtr(cfgOptionStr(cfgOptType)), cvtBoolToConstZ(cfgOptionBool(cfgOptCompress)), + strPtr(backupLabelPrior)); + cfgOptionSet(cfgOptCompress, cfgSourceParam, VARBOOL(manifestPriorData->backupOptionCompress)); + } + + // Warn if hardlink option changed ??? Doesn't seem like this is needed? Hardlinks are always to a directory that + // is guaranteed to contain a real file -- like references. Also annoying that if the full backup was not + // hardlinked then an diff/incr can't be used because we need more testing. + if (cfgOptionBool(cfgOptRepoHardlink) != manifestPriorData->backupOptionHardLink) + { + LOG_WARN_FMT( + "%s backup cannot alter hardlink option to '%s', reset to value in %s", + strPtr(cfgOptionStr(cfgOptType)), cvtBoolToConstZ(cfgOptionBool(cfgOptRepoHardlink)), + strPtr(backupLabelPrior)); + cfgOptionSet(cfgOptRepoHardlink, cfgSourceParam, VARBOOL(manifestPriorData->backupOptionHardLink)); + } + + // If not defined this backup was done in a version prior to page checksums being introduced. Just set + // checksum-page to false and move on without a warning. Page checksums will start on the next full backup. + if (manifestData(result)->backupOptionChecksumPage == NULL) + { + cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, BOOL_FALSE_VAR); + } + // Don't allow the checksum-page option to change in a diff or incr backup. This could be confusing as only + // certain files would be checksummed and the list could be incomplete during reporting. + else + { + bool checksumPagePrior = varBool(manifestData(result)->backupOptionChecksumPage); + + // Warn if an incompatible setting was explicitly requested + if (checksumPagePrior != cfgOptionBool(cfgOptChecksumPage)) + { + LOG_WARN_FMT( + "%s backup cannot alter '" CFGOPT_CHECKSUM_PAGE "' option to '%s', reset to '%s' from %s", + strPtr(cfgOptionStr(cfgOptType)), cvtBoolToConstZ(cfgOptionBool(cfgOptChecksumPage)), + cvtBoolToConstZ(checksumPagePrior), strPtr(manifestData(result)->backupLabel)); + } + + cfgOptionSet(cfgOptChecksumPage, cfgSourceParam, VARBOOL(checksumPagePrior)); + } + + manifestMove(result, MEM_CONTEXT_OLD()); + } + else + { + LOG_WARN_FMT("no prior backup exists, %s backup has been changed to full", strPtr(cfgOptionStr(cfgOptType))); + cfgOptionSet(cfgOptType, cfgSourceParam, VARSTR(backupTypeStr(backupTypeFull))); + } + } + MEM_CONTEXT_TEMP_END(); + } + + FUNCTION_LOG_RETURN(MANIFEST, result); +} + +static bool +backupBuildIncr(const InfoBackup *infoBackup, Manifest *manifest, Manifest *manifestPrior, const String *archiveStart) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM(MANIFEST, manifestPrior); + FUNCTION_LOG_PARAM(STRING, archiveStart); + FUNCTION_LOG_END(); + + ASSERT(infoBackup != NULL); + ASSERT(manifest != NULL); + + bool result = false; + + // No incremental if no prior manifest + if (manifestPrior != NULL) + { + MEM_CONTEXT_TEMP_BEGIN() + { + // Move the manifest to this context so it will be freed when we are done + manifestMove(manifestPrior, MEM_CONTEXT_TEMP()); + + // Build incremental manifest + manifestBuildIncr(manifest, manifestPrior, backupType(cfgOptionStr(cfgOptType)), archiveStart); + + // Set the cipher subpass from prior manifest since we want a single subpass for the entire backup set + manifestCipherSubPassSet(manifest, manifestCipherSubPass(manifestPrior)); + + // Incremental was built + result = true; + } + MEM_CONTEXT_TEMP_END(); + } + + FUNCTION_LOG_RETURN(BOOL, result); +} + +/*********************************************************************************************************************************** +Check for a backup that can be resumed and merge into the manifest if found +***********************************************************************************************************************************/ +typedef struct BackupResumeData +{ + Manifest *manifest; // New manifest + const Manifest *manifestResume; // Resumed manifest + const bool compressed; // Is the backup compressed? + const bool delta; // Is this a delta backup? + const String *backupPath; // Path to the current level of the backup being cleaned + const String *manifestParentName; // Parent manifest name used to construct manifest name +} BackupResumeData; + +// Callback to clean invalid paths/files/links out of the resumable backup path +void backupResumeCallback(void *data, const StorageInfo *info) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM_P(VOID, data); + FUNCTION_TEST_PARAM(STORAGE_INFO, *storageInfo); + FUNCTION_TEST_END(); + + ASSERT(data != NULL); + ASSERT(info != NULL); + + BackupResumeData *resumeData = data; + + // Skip all . paths because they have already been handled on the previous level of recursion + if (strEq(info->name, DOT_STR)) + { + FUNCTION_TEST_RETURN_VOID(); + return; + } + + // Skip backup.manifest.copy -- it must be preserved to allow resume again if this process throws an error before writing the + // manifest for the first time + if (resumeData->manifestParentName == NULL && strEqZ(info->name, BACKUP_MANIFEST_FILE INFO_COPY_EXT)) + { + FUNCTION_TEST_RETURN_VOID(); + return; + } + + // Build the name used to lookup files in the manifest + const String *manifestName = resumeData->manifestParentName != NULL ? + strNewFmt("%s/%s", strPtr(resumeData->manifestParentName), strPtr(info->name)) : info->name; + + // Build the backup path used to remove files/links/paths that are invalid + const String *backupPath = strNewFmt("%s/%s", strPtr(resumeData->backupPath), strPtr(info->name)); + + // Process file types + switch (info->type) + { + // Check paths + // ------------------------------------------------------------------------------------------------------------------------- + case storageTypePath: + { + // If the path was not found in the new manifest then remove it + if (manifestPathFindDefault(resumeData->manifest, manifestName, NULL) == NULL) + { + LOG_DETAIL_FMT("remove path '%s' from resumed backup", strPtr(storagePathP(storageRepo(), backupPath))); + storagePathRemoveP(storageRepoWrite(), backupPath, .recurse = true); + } + // Else recurse into the path + { + BackupResumeData resumeDataSub = *resumeData; + resumeDataSub.manifestParentName = manifestName; + resumeDataSub.backupPath = backupPath; + + storageInfoListP( + storageRepo(), resumeDataSub.backupPath, backupResumeCallback, &resumeDataSub, .sortOrder = sortOrderAsc); + } + + break; + } + + // Check files + // ------------------------------------------------------------------------------------------------------------------------- + case storageTypeFile: + { + // If the backup is compressed then strip off the extension before doing the lookup + if (resumeData->compressed) + manifestName = strSubN(manifestName, 0, strSize(manifestName) - sizeof(GZIP_EXT)); + + // Find the file in both manifests + const ManifestFile *file = manifestFileFindDefault(resumeData->manifest, manifestName, NULL); + const ManifestFile *fileResume = manifestFileFindDefault(resumeData->manifestResume, manifestName, NULL); + + // Check if the file can be resumed or must be removed + const char *removeReason = NULL; + + if (file == NULL) + removeReason = "missing in manifest"; + else if (file->reference != NULL) + removeReason = "reference in manifest"; + else if (fileResume == NULL) + removeReason = "missing in resumed manifest"; + else if (fileResume->reference != NULL) + removeReason = "reference in resumed manifest"; + else if (fileResume->checksumSha1[0] == '\0') + removeReason = "no checksum in resumed manifest"; + else if (file->size != fileResume->size) + removeReason = "mismatched size"; + else if (!resumeData->delta && file->timestamp != fileResume->timestamp) + removeReason = "mismatched timestamp"; + else if (file->size == 0) + // ??? don't resume zero size files because Perl wouldn't -- this can be removed after the migration) + removeReason = "zero size"; + else + { + manifestFileUpdate( + resumeData->manifest, manifestName, file->size, fileResume->sizeRepo, fileResume->checksumSha1, NULL, + fileResume->checksumPage, fileResume->checksumPageError, fileResume->checksumPageErrorList); + } + + // Remove the file if it could not be resumed + if (removeReason != NULL) + { + LOG_DETAIL_FMT( + "remove file '%s' from resumed backup (%s)", strPtr(storagePathP(storageRepo(), backupPath)), removeReason); + storageRemoveP(storageRepoWrite(), backupPath); + } + + break; + } + + // Remove links. We could check that the link has not changed and preserve it but it doesn't seem worth the extra testing. + // The link will be recreated during the backup if needed. + // ------------------------------------------------------------------------------------------------------------------------- + case storageTypeLink: + { + storageRemoveP(storageRepoWrite(), backupPath); + break; + } + + // Remove special files + // ------------------------------------------------------------------------------------------------------------------------- + case storageTypeSpecial: + { + LOG_WARN_FMT("remove special file '%s' from resumed backup", strPtr(storagePathP(storageRepo(), backupPath))); + storageRemoveP(storageRepoWrite(), backupPath); + break; + } + } + + FUNCTION_TEST_RETURN_VOID(); +} + +// Helper to find a resumable backup +static const Manifest * +backupResumeFind(const Manifest *manifest, const String *cipherPassBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_TEST_PARAM(STRING, cipherPassBackup); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + Manifest *result = NULL; + + MEM_CONTEXT_TEMP_BEGIN() + { + // Only the last backup can be resumed + const StringList *backupList = strLstSort( + storageListP( + storageRepo(), STRDEF(STORAGE_REPO_BACKUP), + .expression = backupRegExpP(.full = true, .differential = true, .incremental = true)), + sortOrderDesc); + + if (strLstSize(backupList) > 0) + { + const String *backupLabel = strLstGet(backupList, 0); + const String *manifestFile = strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strPtr(backupLabel)); + + // Resumable backups have a copy of the manifest but no main + if (storageExistsP(storageRepo(), strNewFmt("%s" INFO_COPY_EXT, strPtr(manifestFile))) && + !storageExistsP(storageRepo(), manifestFile)) + { + bool usable = false; + const String *reason = STRDEF("resume is disabled"); + Manifest *manifestResume = NULL; + + // Attempt to read the manifest file in the resumable backup to see if it can be used. If any error at all occurs + // then the backup will be considered unusable and a resume will not be attempted. + if (cfgOptionBool(cfgOptResume)) + { + reason = strNewFmt("unable to read %s" INFO_COPY_EXT, strPtr(manifestFile)); + + TRY_BEGIN() + { + manifestResume = manifestLoadFile( + storageRepo(), manifestFile, cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherPassBackup); + const ManifestData *manifestResumeData = manifestData(manifestResume); + + // Check pgBackRest version. This allows the resume implementation to be changed with each version of + // pgBackRest at the expense of users losing a resumable back after an upgrade, which seems worth the cost. + if (!strEq(manifestResumeData->backrestVersion, manifestData(manifest)->backrestVersion)) + { + reason = strNewFmt( + "new " PROJECT_NAME " version '%s' does not match resumable " PROJECT_NAME " version '%s'", + strPtr(manifestData(manifest)->backrestVersion), strPtr(manifestResumeData->backrestVersion)); + } + // Check backup type because new backup label must be the same type as resume backup label + else if (manifestResumeData->backupType != backupType(cfgOptionStr(cfgOptType))) + { + reason = strNewFmt( + "new backup type '%s' does not match resumable backup type '%s'", strPtr(cfgOptionStr(cfgOptType)), + strPtr(backupTypeStr(manifestResumeData->backupType))); + } + // Check prior backup label ??? Do we really care about the prior backup label? + else if (!strEq(manifestResumeData->backupLabelPrior, manifestData(manifest)->backupLabelPrior)) + { + reason = strNewFmt( + "new prior backup label '%s' does not match resumable prior backup label '%s'", + manifestResumeData->backupLabelPrior ? strPtr(manifestResumeData->backupLabelPrior) : "", + manifestData(manifest)->backupLabelPrior ? + strPtr(manifestData(manifest)->backupLabelPrior) : ""); + } + // Check compression. Compression can't be changed between backups so resume won't work either. + else if (manifestResumeData->backupOptionCompress != cfgOptionBool(cfgOptCompress)) + { + reason = strNewFmt( + "new compression '%s' does not match resumable compression '%s'", + cvtBoolToConstZ(cfgOptionBool(cfgOptCompress)), + cvtBoolToConstZ(manifestResumeData->backupOptionCompress)); + } + else + usable = true; + } + CATCH_ANY() + { + } + TRY_END(); + } + + // If the backup is usable then return the manifest + if (usable) + { + result = manifestMove(manifestResume, MEM_CONTEXT_OLD()); + } + // Else warn and remove the unusable backup + else + { + LOG_WARN_FMT("backup '%s' cannot be resumed: %s", strPtr(backupLabel), strPtr(reason)); + + storagePathRemoveP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(backupLabel)), .recurse = true); + } + } + } + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN(MANIFEST, result); +} + +static bool +backupResume(Manifest *manifest, const String *cipherPassBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_TEST_PARAM(STRING, cipherPassBackup); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + bool result = false; + + MEM_CONTEXT_TEMP_BEGIN() + { + const Manifest *manifestResume = backupResumeFind(manifest, cipherPassBackup); + + // If a resumable backup was found set the label and cipher subpass + if (manifestResume) + { + // Resuming + result = true; + + // Set the backup label to the resumed backup + manifestBackupLabelSet(manifest, manifestData(manifestResume)->backupLabel); + + LOG_WARN_FMT( + "resumable backup %s of same type exists -- remove invalid files and resume", + strPtr(manifestData(manifest)->backupLabel)); + + // If resuming a full backup then copy cipher subpass since it was used to encrypt the resumable files + if (manifestData(manifest)->backupType == backupTypeFull) + manifestCipherSubPassSet(manifest, manifestCipherSubPass(manifestResume)); + + // Clean resumed backup + BackupResumeData resumeData = + { + .manifest = manifest, + .manifestResume = manifestResume, + .compressed = cfgOptionBool(cfgOptCompress), + .delta = cfgOptionBool(cfgOptDelta), + .backupPath = strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(manifestData(manifest)->backupLabel)), + }; + + storageInfoListP(storageRepo(), resumeData.backupPath, backupResumeCallback, &resumeData, .sortOrder = sortOrderAsc); + } + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN(BOOL, result); +} + +/*********************************************************************************************************************************** +Start the backup +***********************************************************************************************************************************/ +typedef struct BackupStartResult +{ + String *lsn; + String *walSegmentName; + VariantList *dbList; + VariantList *tablespaceList; +} BackupStartResult; + +#define FUNCTION_LOG_BACKUP_START_RESULT_TYPE \ + BackupStartResult +#define FUNCTION_LOG_BACKUP_START_RESULT_FORMAT(value, buffer, bufferSize) \ + objToLog(&value, "BackupStartResult", buffer, bufferSize) + +static BackupStartResult +backupStart(BackupData *backupData) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(BACKUP_DATA, backupData); + FUNCTION_LOG_END(); + + BackupStartResult result = {.lsn = NULL}; + + MEM_CONTEXT_TEMP_BEGIN() + { + // If this is an offline backup + if (!cfgOptionBool(cfgOptOnline)) + { + // Check if Postgres is running and if so only continue when forced + if (storageExistsP(backupData->storagePrimary, PG_FILE_POSTMASTERPID_STR)) + { + if (cfgOptionBool(cfgOptForce)) + { + LOG_WARN( + "--no-" CFGOPT_ONLINE " passed and " PG_FILE_POSTMASTERPID " exists but --" CFGOPT_FORCE " was passed so" + " backup will continue though it looks like the postmaster is running and the backup will probably not be" + " consistent"); + } + else + { + THROW( + PostmasterRunningError, + "--no-" CFGOPT_ONLINE " passed but " PG_FILE_POSTMASTERPID " exists - looks like the postmaster is running." + " Shutdown the postmaster and try again, or use --force."); + } + } + } + // Else start the backup normally + else + { + // Check database configuration + checkDbConfig(backupData->version, backupData->pgIdPrimary, backupData->dbPrimary, false); + + // Start backup + LOG_INFO_FMT( + "execute %sexclusive pg_start_backup(): backup begins after the %s checkpoint completes", + backupData->version >= PG_VERSION_96 ? "non-" : "", + cfgOptionBool(cfgOptStartFast) ? "requested immediate" : "next regular"); + + DbBackupStartResult dbBackupStartResult = dbBackupStart( + backupData->dbPrimary, cfgOptionBool(cfgOptStartFast), cfgOptionBool(cfgOptStopAuto)); + + memContextSwitch(MEM_CONTEXT_OLD()); + result.lsn = strDup(dbBackupStartResult.lsn); + result.walSegmentName = strDup(dbBackupStartResult.walSegmentName); + result.dbList = dbList(backupData->dbPrimary); + result.tablespaceList = dbTablespaceList(backupData->dbPrimary); + memContextSwitch(MEM_CONTEXT_TEMP()); + + LOG_INFO_FMT("backup start archive = %s, lsn = %s", strPtr(result.walSegmentName), strPtr(result.lsn)); + + // Wait for replay on the standby to catch up + if (cfgOptionBool(cfgOptBackupStandby)) + { + LOG_INFO_FMT("wait for replay on the standby to reach %s", strPtr(result.lsn)); + dbReplayWait(backupData->dbStandby, result.lsn, (TimeMSec)(cfgOptionDbl(cfgOptArchiveTimeout) * MSEC_PER_SEC)); + LOG_INFO_FMT("replay on the standby reached %s", strPtr(result.lsn)); + + // The standby db object won't be used anymore so free it + dbFree(backupData->dbStandby); + + // The standby protocol connection won't be used anymore so free it + protocolRemoteFree(backupData->pgIdStandby); + } + } + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN(BACKUP_START_RESULT, result); +} + +/*********************************************************************************************************************************** +Stop the backup +***********************************************************************************************************************************/ +// Helper to write a file from a string to the repository and update the manifest +static void +backupFilePut(BackupData *backupData, Manifest *manifest, const String *name, time_t timestamp, const String *content) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(BACKUP_DATA, backupData); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM(STRING, name); + FUNCTION_LOG_PARAM(TIME, timestamp); + FUNCTION_LOG_PARAM(STRING, content); + FUNCTION_LOG_END(); + + // Skip files with no content + if (content != NULL) + { + MEM_CONTEXT_TEMP_BEGIN() + { + // Create file + const String *manifestName = strNewFmt(MANIFEST_TARGET_PGDATA "/%s", strPtr(name)); + bool compress = cfgOptionBool(cfgOptCompress); + + StorageWrite *write = storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_BACKUP "/%s/%s%s", strPtr(manifestData(manifest)->backupLabel), strPtr(manifestName), + compress ? "." GZIP_EXT : ""), + .compressible = true); + + IoFilterGroup *filterGroup = ioWriteFilterGroup(storageWriteIo(write)); + + // Add SHA1 filter + ioFilterGroupAdd(filterGroup, cryptoHashNew(HASH_TYPE_SHA1_STR)); + + // Add compression + if (compress) + { + ioFilterGroupAdd( + ioWriteFilterGroup(storageWriteIo(write)), gzipCompressNew((int)cfgOptionUInt(cfgOptCompressLevel), false)); + } + + // Add encryption filter if required + cipherBlockFilterGroupAdd( + filterGroup, cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeEncrypt, manifestCipherSubPass(manifest)); + + // Add size filter last to calculate repo size + ioFilterGroupAdd(filterGroup, ioSizeNew()); + + // Write file + storagePutP(write, BUFSTR(content)); + + // Use base path to set ownership and mode + const ManifestPath *basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR); + + // Add to manifest + ManifestFile file = + { + .name = manifestName, + .primary = true, + .mode = basePath->mode & (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH), + .user = basePath->user, + .group = basePath->group, + .size = strSize(content), + .sizeRepo = varUInt64Force(ioFilterGroupResult(filterGroup, SIZE_FILTER_TYPE_STR)), + .timestamp = timestamp, + }; + + memcpy( + file.checksumSha1, strPtr(varStr(ioFilterGroupResult(filterGroup, CRYPTO_HASH_FILTER_TYPE_STR))), + HASH_TYPE_SHA1_SIZE_HEX + 1); + + manifestFileAdd(manifest, &file); + + LOG_DETAIL_FMT("wrote '%s' file returned from pg_stop_backup()", strPtr(name)); + } + MEM_CONTEXT_TEMP_END(); + } + + FUNCTION_LOG_RETURN_VOID(); +} + +/*--------------------------------------------------------------------------------------------------------------------------------*/ +typedef struct BackupStopResult +{ + String *lsn; + String *walSegmentName; + time_t timestamp; +} BackupStopResult; + +#define FUNCTION_LOG_BACKUP_STOP_RESULT_TYPE \ + BackupStopResult +#define FUNCTION_LOG_BACKUP_STOP_RESULT_FORMAT(value, buffer, bufferSize) \ + objToLog(&value, "BackupStopResult", buffer, bufferSize) + +static BackupStopResult +backupStop(BackupData *backupData, Manifest *manifest) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(BACKUP_DATA, backupData); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_END(); + + BackupStopResult result = {.lsn = NULL}; + + if (cfgOptionBool(cfgOptOnline)) + { + MEM_CONTEXT_TEMP_BEGIN() + { + // Stop the backup + LOG_INFO_FMT( + "execute %sexclusive pg_stop_backup() and wait for all WAL segments to archive", + backupData->version >= PG_VERSION_96 ? "non-" : ""); + + DbBackupStopResult dbBackupStopResult = dbBackupStop(backupData->dbPrimary); + + memContextSwitch(MEM_CONTEXT_OLD()); + result.timestamp = backupTime(backupData, false); + result.lsn = strDup(dbBackupStopResult.lsn); + result.walSegmentName = strDup(dbBackupStopResult.walSegmentName); + memContextSwitch(MEM_CONTEXT_TEMP()); + + LOG_INFO_FMT("backup stop archive = %s, lsn = %s", strPtr(result.walSegmentName), strPtr(result.lsn)); + + // Save files returned by stop backup + backupFilePut(backupData, manifest, STRDEF(PG_FILE_BACKUPLABEL), result.timestamp, dbBackupStopResult.backupLabel); + backupFilePut(backupData, manifest, STRDEF(PG_FILE_TABLESPACEMAP), result.timestamp, dbBackupStopResult.tablespaceMap); + } + MEM_CONTEXT_TEMP_END(); + } + else + result.timestamp = backupTime(backupData, false); + + FUNCTION_LOG_RETURN(BACKUP_STOP_RESULT, result); +} + +/*********************************************************************************************************************************** +Log the results of a job and throw errors +***********************************************************************************************************************************/ +static uint64_t +backupJobResult( + Manifest *manifest, const String *host, const String *const fileName, ProtocolParallelJob *const job, const uint64_t sizeTotal, + uint64_t sizeCopied, unsigned int pageSize) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM(STRING, host); + FUNCTION_LOG_PARAM(STRING, fileName); + FUNCTION_LOG_PARAM(PROTOCOL_PARALLEL_JOB, job); + FUNCTION_LOG_PARAM(UINT64, sizeTotal); + FUNCTION_LOG_PARAM(UINT64, sizeCopied); + FUNCTION_LOG_PARAM(UINT, pageSize); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + ASSERT(fileName != NULL); + ASSERT(job != NULL); + + // The job was successful + if (protocolParallelJobErrorCode(job) == 0) + { + MEM_CONTEXT_TEMP_BEGIN() + { + const ManifestFile *const file = manifestFileFind(manifest, varStr(protocolParallelJobKey(job))); + const unsigned int processId = protocolParallelJobProcessId(job); + + const VariantList *const jobResult = varVarLst(protocolParallelJobResult(job)); + const BackupCopyResult copyResult = (BackupCopyResult)varUIntForce(varLstGet(jobResult, 0)); + const uint64_t copySize = varUInt64(varLstGet(jobResult, 1)); + const uint64_t repoSize = varUInt64(varLstGet(jobResult, 2)); + const String *const copyChecksum = varStr(varLstGet(jobResult, 3)); + const KeyValue *const checksumPageResult = varKv(varLstGet(jobResult, 4)); + + // Increment backup copy progress + sizeCopied += copySize; + + // Create log file name + const String *fileLog = host == NULL ? fileName : strNewFmt("%s:%s", strPtr(host), strPtr(fileName)); + + // Format log strings + const String *const logProgress = + strNewFmt( + "%s, %" PRIu64 "%%", strPtr(strSizeFormat(copySize)), sizeTotal == 0 ? 100 : sizeCopied * 100 / sizeTotal); + const String *const logChecksum = copySize != 0 ? strNewFmt(" checksum %s", strPtr(copyChecksum)) : EMPTY_STR; + + // If the file is in a prior backup and nothing changed, just log it + if (copyResult == backupCopyResultNoOp) + { + LOG_DETAIL_PID_FMT( + processId, "match file from prior backup %s (%s)%s", strPtr(fileLog), strPtr(logProgress), strPtr(logChecksum)); + } + // Else if the repo matched the expect checksum, just log it + else if (copyResult == backupCopyResultChecksum) + { + LOG_DETAIL_PID_FMT( + processId, "checksum resumed file %s (%s)%s", strPtr(fileLog), strPtr(logProgress), strPtr(logChecksum)); + } + // Else if the file was removed during backup then remove from manifest + else if (copyResult == backupCopyResultSkip) + { + LOG_DETAIL_PID_FMT(processId, "skip file removed by database %s", strPtr(fileLog)); + manifestFileRemove(manifest, file->name); + } + // Else file was copied so update manifest + else + { + // If the file had to be recopied then warn that there may be an issue with corruption in the repository + // ??? This should really be below the message below for more context -- can be moved after the migration + // ??? The name should be a pg path not manifest name -- can be fixed after the migration + if (copyResult == backupCopyResultReCopy) + { + LOG_WARN_FMT( + "resumed backup file %s does not have expected checksum %s. The file will be recopied and backup will" + " continue but this may be an issue unless the resumed backup path in the repository is known to be" + " corrupted.\n" + "NOTE: this does not indicate a problem with the PostgreSQL page checksums.", + strPtr(file->name), file->checksumSha1); + } + + LOG_INFO_PID_FMT( + processId, "backup file %s (%s)%s", strPtr(fileLog), strPtr(logProgress), strPtr(logChecksum)); + + // If the file had page checksums calculated during the copy + ASSERT((!file->checksumPage && checksumPageResult == NULL) || (file->checksumPage && checksumPageResult != NULL)); + + bool checksumPageError = false; + const VariantList *checksumPageErrorList = NULL; + + if (checksumPageResult != NULL) + { + // If the checksum was valid + if (!varBool(kvGet(checksumPageResult, VARSTRDEF("valid")))) + { + checksumPageError = true; + + if (!varBool(kvGet(checksumPageResult, VARSTRDEF("align")))) + { + checksumPageErrorList = NULL; + + // ??? Update formatting after migration + LOG_WARN_FMT( + "page misalignment in file %s: file size %" PRIu64 " is not divisible by page size %u", + strPtr(fileLog), copySize, pageSize); + } + else + { + // Format the page checksum errors + checksumPageErrorList = varVarLst(kvGet(checksumPageResult, VARSTRDEF("error"))); + ASSERT(varLstSize(checksumPageErrorList) > 0); + + String *error = strNew(""); + unsigned int errorTotalMin = 0; + + for (unsigned int errorIdx = 0; errorIdx < varLstSize(checksumPageErrorList); errorIdx++) + { + const Variant *const errorItem = varLstGet(checksumPageErrorList, errorIdx); + + // Add a comma if this is not the first item + if (errorIdx != 0) + strCat(error, ", "); + + // If an error range + if (varType(errorItem) == varTypeVariantList) + { + const VariantList *const errorItemList = varVarLst(errorItem); + ASSERT(varLstSize(errorItemList) == 2); + + strCatFmt( + error, "%" PRIu64 "-%" PRIu64, varUInt64(varLstGet(errorItemList, 0)), + varUInt64(varLstGet(errorItemList, 1))); + errorTotalMin += 2; + } + // Else a single error + else + { + ASSERT(varType(errorItem) == varTypeUInt64); + + strCatFmt(error, "%" PRIu64, varUInt64(errorItem)); + errorTotalMin++; + } + } + + // Make message plural when appropriate + const String *const plural = errorTotalMin > 1 ? STRDEF("s") : EMPTY_STR; + + // ??? Update formatting after migration + LOG_WARN_FMT( + "invalid page checksum%s found in file %s at page%s %s", strPtr(plural), strPtr(fileLog), + strPtr(plural), strPtr(error)); + } + } + } + + // Update file info and remove any reference to the file's existence in a prior backup + manifestFileUpdate( + manifest, file->name, copySize, repoSize, copySize > 0 ? strPtr(copyChecksum) : "", VARSTR(NULL), + file->checksumPage, checksumPageError, checksumPageErrorList); + } + } + MEM_CONTEXT_TEMP_END(); + + // Free the job + protocolParallelJobFree(job); + } + // Else the job errored + else + THROW_CODE(protocolParallelJobErrorCode(job), strPtr(protocolParallelJobErrorMessage(job))); + + FUNCTION_LOG_RETURN(UINT64, sizeCopied); +} + +/*********************************************************************************************************************************** +Save a copy of the backup manifest during processing to preserve checksums for a possible resume +***********************************************************************************************************************************/ +static void +backupManifestSaveCopy(Manifest *const manifest, const String *cipherPassBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_TEST_PARAM(STRING, cipherPassBackup); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + MEM_CONTEXT_TEMP_BEGIN() + { + // Open file for write + IoWrite *write = storageWriteIo( + storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(manifestData(manifest)->backupLabel)))); + + // Add encryption filter if required + cipherBlockFilterGroupAdd( + ioWriteFilterGroup(write), cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeEncrypt, cipherPassBackup); + + // Save file + manifestSave(manifest, write); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Process the backup manifest +***********************************************************************************************************************************/ +// Comparator to order ManifestFile objects by size then name +static int +backupProcessQueueComparator(const void *item1, const void *item2) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM_P(VOID, item1); + FUNCTION_TEST_PARAM_P(VOID, item2); + FUNCTION_TEST_END(); + + ASSERT(item1 != NULL); + ASSERT(item2 != NULL); + + // If the size differs then that's enough to determine order + if ((*(ManifestFile **)item1)->size < (*(ManifestFile **)item2)->size) + FUNCTION_TEST_RETURN(-1); + else if ((*(ManifestFile **)item1)->size > (*(ManifestFile **)item2)->size) + FUNCTION_TEST_RETURN(1); + + // If size is the same then use name to generate a deterministic ordering (names must be unique) + FUNCTION_TEST_RETURN(strCmp((*(ManifestFile **)item1)->name, (*(ManifestFile **)item2)->name)); +} + +// Helper to generate the backup queues +static uint64_t +backupProcessQueue(Manifest *manifest, List **queueList) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM_P(LIST, queueList); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + uint64_t result = 0; + + MEM_CONTEXT_TEMP_BEGIN() + { + // Create list of process queue + *queueList = lstNew(sizeof(List *)); + + // Generate the list of targets + StringList *targetList = strLstNew(); + strLstAdd(targetList, STRDEF(MANIFEST_TARGET_PGDATA "/")); + + for (unsigned int targetIdx = 0; targetIdx < manifestTargetTotal(manifest); targetIdx++) + { + const ManifestTarget *target = manifestTarget(manifest, targetIdx); + + if (target->tablespaceId != 0) + strLstAdd(targetList, strNewFmt("%s/", strPtr(target->name))); + } + + // Generate the processing queues (there is always at least one) + bool backupStandby = cfgOptionBool(cfgOptBackupStandby); + unsigned int queueOffset = backupStandby ? 1 : 0; + + MEM_CONTEXT_BEGIN(lstMemContext(*queueList)) + { + for (unsigned int queueIdx = 0; queueIdx < strLstSize(targetList) + queueOffset; queueIdx++) + { + List *queue = lstNewP(sizeof(ManifestFile *), .comparator = backupProcessQueueComparator); + lstAdd(*queueList, &queue); + } + } + MEM_CONTEXT_END(); + + // Now put all files into the processing queues + bool delta = cfgOptionBool(cfgOptDelta); + uint64_t fileTotal = 0; + bool pgControlFound = false; + + for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(manifest); fileIdx++) + { + const ManifestFile *file = manifestFile(manifest, fileIdx); + + // If the file is a reference it should only be backed up if delta and not zero size + if (file->reference != NULL && (!delta || file->size == 0)) + continue; + + // Is pg_control in the backup? + if (strEq(file->name, STRDEF(MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL))) + pgControlFound = true; + + // Files that must be copied from the primary are always put in queue 0 when backup from standby + if (backupStandby && file->primary) + { + lstAdd(*(List **)lstGet(*queueList, 0), &file); + } + // Else find the correct queue by matching the file to a target + else + { + // Find the target that contains this file + unsigned int targetIdx = 0; + + do + { + // A target should always be found + CHECK(targetIdx < strLstSize(targetList)); + + if (strBeginsWith(file->name, strLstGet(targetList, targetIdx))) + break; + + targetIdx++; + } + while (1); + + // Add file to queue + lstAdd(*(List **)lstGet(*queueList, targetIdx + queueOffset), &file); + } + + // Add size to total + result += file->size; + + // Increment total files + fileTotal++; + } + + // pg_control should always be in an online backup + if (!pgControlFound && cfgOptionBool(cfgOptOnline)) + { + THROW( + FileMissingError, + PG_FILE_PGCONTROL " must be present in all online backups\n" + "HINT: is something wrong with the clock or filesystem timestamps?"); + } + + // If there are no files to backup then we'll exit with an error. This could happen if the database is down and backup is + // called with --no-online twice in a row. + if (fileTotal == 0) + THROW(FileMissingError, "no files have changed since the last backup - this seems unlikely"); + + // Sort the queues + for (unsigned int targetIdx = 0; targetIdx < strLstSize(targetList); targetIdx++) + lstSort(*(List **)lstGet(*queueList, targetIdx), sortOrderDesc); + + // Move process queues to calling context + lstMove(*queueList, MEM_CONTEXT_OLD()); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN(UINT64, result); +} + +// Helper to caculate the next queue to scan based on the client index +static int +backupJobQueueNext(unsigned int clientIdx, int queueIdx, unsigned int queueTotal) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM(UINT, clientIdx); + FUNCTION_TEST_PARAM(INT, queueIdx); + FUNCTION_TEST_PARAM(UINT, queueTotal); + FUNCTION_TEST_END(); + + // Move (forward or back) to the next queue + queueIdx += clientIdx % 2 ? -1 : 1; + + // Deal with wrapping on either end + if (queueIdx < 0) + FUNCTION_TEST_RETURN((int)queueTotal - 1); + else if (queueIdx == (int)queueTotal) + FUNCTION_TEST_RETURN(0); + + FUNCTION_TEST_RETURN(queueIdx); +} + +// Callback to fetch backup jobs for the parallel executor +typedef struct BackupJobData +{ + const String *const backupLabel; // Backup label (defines the backup path) + const bool backupStandby; // Backup from standby + const String *const cipherSubPass; // Passphrase used to encrypt files in the backup + const bool compress; // Is the backup compressed? + const unsigned int compressLevel; // Compress level if backup is compressed + const bool delta; // Is this a checksum delta backup? + const uint64_t lsnStart; // Starting lsn for the backup + + List *queueList; // List of processing queues +} BackupJobData; + +static ProtocolParallelJob *backupJobCallback(void *data, unsigned int clientIdx) +{ + FUNCTION_TEST_BEGIN(); + FUNCTION_TEST_PARAM_P(VOID, data); + FUNCTION_TEST_PARAM(UINT, clientIdx); + FUNCTION_TEST_END(); + + ASSERT(data != NULL); + + ProtocolParallelJob *result = NULL; + + MEM_CONTEXT_TEMP_BEGIN() + { + // Get a new job if there are any left + BackupJobData *jobData = data; + + // Determine where to begin scanning the queue (we'll stop when we get back here). When copying from the primary during + // backup from standby only queue 0 will be used. + unsigned int queueOffset = jobData->backupStandby && clientIdx > 0 ? 1 : 0; + int queueIdx = jobData->backupStandby && clientIdx == 0 ? + 0 : (int)(clientIdx % (lstSize(jobData->queueList) - queueOffset)); + int queueEnd = queueIdx; + + do + { + List *queue = *(List **)lstGet(jobData->queueList, (unsigned int)queueIdx + queueOffset); + + if (lstSize(queue) > 0) + { + const ManifestFile *file = *(ManifestFile **)lstGet(queue, 0); + + // Create backup job + ProtocolCommand *command = protocolCommandNew(PROTOCOL_COMMAND_BACKUP_FILE_STR); + + protocolCommandParamAdd(command, VARSTR(manifestPathPg(file->name))); + protocolCommandParamAdd( + command, VARBOOL(!strEq(file->name, STRDEF(MANIFEST_TARGET_PGDATA "/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL)))); + protocolCommandParamAdd(command, VARUINT64(file->size)); + protocolCommandParamAdd(command, file->checksumSha1[0] != 0 ? VARSTRZ(file->checksumSha1) : NULL); + protocolCommandParamAdd(command, VARBOOL(file->checksumPage)); + protocolCommandParamAdd(command, VARUINT64(jobData->lsnStart)); + protocolCommandParamAdd(command, VARSTR(file->name)); + protocolCommandParamAdd(command, VARBOOL(file->reference != NULL)); + protocolCommandParamAdd(command, VARBOOL(jobData->compress)); + protocolCommandParamAdd(command, VARUINT(jobData->compressLevel)); + protocolCommandParamAdd(command, VARSTR(jobData->backupLabel)); + protocolCommandParamAdd(command, VARBOOL(jobData->delta)); + protocolCommandParamAdd(command, VARSTR(jobData->cipherSubPass)); + + // Remove job from the queue + lstRemoveIdx(queue, 0); + + // Assign job to result + result = protocolParallelJobMove(protocolParallelJobNew(VARSTR(file->name), command), MEM_CONTEXT_OLD()); + + // Break out of the loop early since we found a job + break; + } + + // Don't get next queue when copying from primary during backup from standby since the primary only has one queue + if (!jobData->backupStandby || clientIdx > 0) + queueIdx = backupJobQueueNext(clientIdx, queueIdx, lstSize(jobData->queueList) - queueOffset); + } + while (queueIdx != queueEnd); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_TEST_RETURN(result); +} + +static void +backupProcess(BackupData *backupData, Manifest *manifest, const String *lsnStart, const String *cipherPassBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(BACKUP_DATA, backupData); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM(STRING, lsnStart); + FUNCTION_TEST_PARAM(STRING, cipherPassBackup); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + MEM_CONTEXT_TEMP_BEGIN() + { + // Get backup info + const BackupType backupType = manifestData(manifest)->backupType; + const String *const backupLabel = manifestData(manifest)->backupLabel; + const String *const backupPathExp = strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(backupLabel)); + bool hardLink = cfgOptionBool(cfgOptRepoHardlink) && storageFeature(storageRepoWrite(), storageFeatureHardLink); + bool backupStandby = cfgOptionBool(cfgOptBackupStandby); + + // If this is a full backup or hard-linked and paths are supported then create all paths explicitly so that empty paths will + // exist in to repo. Also create tablspace symlinks when symlinks are available, This makes it possible for the user to + // make a copy of the backup path and get a valid cluster. + if (backupType == backupTypeFull || hardLink) + { + // Create paths when available + if (storageFeature(storageRepoWrite(), storageFeaturePath)) + { + for (unsigned int pathIdx = 0; pathIdx < manifestPathTotal(manifest); pathIdx++) + { + storagePathCreateP( + storageRepoWrite(), + strNewFmt("%s/%s", strPtr(backupPathExp), strPtr(manifestPath(manifest, pathIdx)->name))); + } + } + + // Create tablespace symlinks when available + if (storageFeature(storageRepoWrite(), storageFeatureSymLink)) + { + for (unsigned int targetIdx = 0; targetIdx < manifestTargetTotal(manifest); targetIdx++) + { + const ManifestTarget *const target = manifestTarget(manifest, targetIdx); + + if (target->tablespaceId != 0) + { + const String *const link = storagePathP( + storageRepo(), + strNewFmt("%s/" MANIFEST_TARGET_PGDATA "/%s", strPtr(backupPathExp), strPtr(target->name))); + const String *const linkDestination = strNewFmt( + "../../" MANIFEST_TARGET_PGTBLSPC "/%u", target->tablespaceId); + + THROW_ON_SYS_ERROR_FMT( + symlink(strPtr(linkDestination), strPtr(link)) == -1, FileOpenError, + "unable to create symlink '%s' to '%s'", strPtr(link), strPtr(linkDestination)); + } + } + } + } + + // Generate processing queues + BackupJobData jobData = + { + .backupLabel = backupLabel, + .backupStandby = backupStandby, + .compress = cfgOptionBool(cfgOptCompress), + .compressLevel = cfgOptionUInt(cfgOptCompressLevel), + .cipherSubPass = manifestCipherSubPass(manifest), + .delta = cfgOptionBool(cfgOptDelta), + .lsnStart = cfgOptionBool(cfgOptOnline) ? pgLsnFromStr(lsnStart) : 0xFFFFFFFFFFFFFFFF, + }; + + uint64_t sizeTotal = backupProcessQueue(manifest, &jobData.queueList); + + // Create the parallel executor + ProtocolParallel *parallelExec = protocolParallelNew( + (TimeMSec)(cfgOptionDbl(cfgOptProtocolTimeout) * MSEC_PER_SEC) / 2, backupJobCallback, &jobData); + + // First client is always on the primary + protocolParallelClientAdd(parallelExec, protocolLocalGet(protocolStorageTypePg, backupData->pgIdPrimary, 1)); + + // Create the rest of the clients on the primary or standby depending on the value of backup-standby. Note that standby + // backups don't count the primary client in process-max. + unsigned int processMax = cfgOptionUInt(cfgOptProcessMax) + (backupStandby ? 1 : 0); + unsigned int pgId = backupStandby ? backupData->pgIdStandby : backupData->pgIdPrimary; + + for (unsigned int processIdx = 2; processIdx <= processMax; processIdx++) + protocolParallelClientAdd(parallelExec, protocolLocalGet(protocolStorageTypePg, pgId, processIdx)); + + // Determine how often the manifest will be saved (every one percent or threshold size, whichever is greater) + uint64_t manifestSaveLast = 0; + uint64_t manifestSaveSize = sizeTotal / 100; + + if (manifestSaveSize < cfgOptionUInt64(cfgOptManifestSaveThreshold)) + manifestSaveSize = cfgOptionUInt64(cfgOptManifestSaveThreshold); + + // Process jobs + uint64_t sizeCopied = 0; + + MEM_CONTEXT_TEMP_RESET_BEGIN() + { + do + { + unsigned int completed = protocolParallelProcess(parallelExec); + + for (unsigned int jobIdx = 0; jobIdx < completed; jobIdx++) + { + ProtocolParallelJob *job = protocolParallelResult(parallelExec); + + sizeCopied = backupJobResult( + manifest, + backupStandby && protocolParallelJobProcessId(job) > 1 ? backupData->hostStandby : backupData->hostPrimary, + storagePathP( + protocolParallelJobProcessId(job) > 1 ? storagePgId(pgId) : backupData->storagePrimary, + manifestPathPg(manifestFileFind(manifest, varStr(protocolParallelJobKey(job)))->name)), + job, sizeTotal, sizeCopied, backupData->pageSize); + } + + // A keep-alive is required here for the remote holding open the backup connection + protocolKeepAlive(); + + // Save the manifest periodically to preserve checksums for resume + if (sizeCopied - manifestSaveLast >= manifestSaveSize) + { + backupManifestSaveCopy(manifest, cipherPassBackup); + manifestSaveLast = sizeCopied; + } + + // Reset the memory context occasionally so we don't use too much memory or slow down processing + MEM_CONTEXT_TEMP_RESET(1000); + } + while (!protocolParallelDone(parallelExec)); + } + MEM_CONTEXT_TEMP_END(); + + // Log references or create hardlinks for all files + const char *const compressExt = jobData.compress ? "." GZIP_EXT : ""; + + for (unsigned int fileIdx = 0; fileIdx < manifestFileTotal(manifest); fileIdx++) + { + const ManifestFile *const file = manifestFile(manifest, fileIdx); + + // If the file has a reference, then it was not copied since it can be retrieved from the referenced backup. However, + // if hardlinking is enabled the link will need to be created. + if (file->reference != NULL) + { + // If hardlinking is enabled then create a hardlink for files that have not changed since the last backup + if (hardLink) + { + LOG_DETAIL_FMT("hardlink %s to %s", strPtr(file->name), strPtr(file->reference)); + + const String *const linkName = storagePathP( + storageRepo(), strNewFmt("%s/%s%s", strPtr(backupPathExp), strPtr(file->name), compressExt)); + const String *const linkDestination = storagePathP( + storageRepo(), + strNewFmt(STORAGE_REPO_BACKUP "/%s/%s%s", strPtr(file->reference), strPtr(file->name), compressExt)); + + THROW_ON_SYS_ERROR_FMT( + link(strPtr(linkDestination), strPtr(linkName)) == -1, FileOpenError, + "unable to create hardlink '%s' to '%s'", strPtr(linkName), strPtr(linkDestination)); + } + // Else log the reference. With delta, it is possible that references may have been removed if a file needed to be + // recopied. + else + LOG_DETAIL_FMT("reference %s to %s", strPtr(file->name), strPtr(file->reference)); + } + } + + // Sync backup paths if required + if (storageFeature(storageRepoWrite(), storageFeaturePathSync)) + { + for (unsigned int pathIdx = 0; pathIdx < manifestPathTotal(manifest); pathIdx++) + { + const String *const path = strNewFmt("%s/%s", strPtr(backupPathExp), strPtr(manifestPath(manifest, pathIdx)->name)); + + if (backupType == backupTypeFull || hardLink || storagePathExistsP(storageRepo(), path)) + storagePathSyncP(storageRepoWrite(), path); + } + } + + LOG_INFO_FMT("%s backup size = %s", strPtr(backupTypeStr(backupType)), strPtr(strSizeFormat(sizeTotal))); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Check and copy WAL segments required to make the backup consistent +***********************************************************************************************************************************/ +static void +backupArchiveCheckCopy(Manifest *manifest, unsigned int walSegmentSize, const String *cipherPassBackup) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_PARAM(UINT, walSegmentSize); + FUNCTION_TEST_PARAM(STRING, cipherPassBackup); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + // If archive logs are required to complete the backup, then check them. This is the default, but can be overridden if the + // archive logs are going to a different server. Be careful of disabling this option because there is no way to verify that the + // backup will be consistent - at least not here. + if (cfgOptionBool(cfgOptOnline) && cfgOptionBool(cfgOptArchiveCheck)) + { + MEM_CONTEXT_TEMP_BEGIN() + { + unsigned int timeline = cvtZToUInt(strPtr(strSubN(manifestData(manifest)->archiveStart, 0, 8))); + uint64_t lsnStart = pgLsnFromStr(manifestData(manifest)->lsnStart); + uint64_t lsnStop = pgLsnFromStr(manifestData(manifest)->lsnStop); + + LOG_INFO_FMT( + "check archive for segment(s) %s:%s", strPtr(pgLsnToWalSegment(timeline, lsnStart, walSegmentSize)), + strPtr(pgLsnToWalSegment(timeline, lsnStop, walSegmentSize))); + + // Save the backup manifest before getting archive logs in case of failure + backupManifestSaveCopy(manifest, cipherPassBackup); + + // Use base path to set ownership and mode + const ManifestPath *basePath = manifestPathFind(manifest, MANIFEST_TARGET_PGDATA_STR); + + // Loop through all the segments in the lsn range + InfoArchive *infoArchive = infoArchiveLoadFile( + storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), + cfgOptionStr(cfgOptRepoCipherPass)); + const String *archiveId = infoArchiveId(infoArchive); + + StringList *walSegmentList = pgLsnRangeToWalSegmentList( + manifestData(manifest)->pgVersion, timeline, lsnStart, lsnStop, walSegmentSize); + + for (unsigned int walSegmentIdx = 0; walSegmentIdx < strLstSize(walSegmentList); walSegmentIdx++) + { + const String *walSegment = strLstGet(walSegmentList, walSegmentIdx); + + // Find the actual wal segment file in the archive + const String *archiveFile = walSegmentFind( + storageRepo(), archiveId, walSegment, (TimeMSec)(cfgOptionDbl(cfgOptArchiveTimeout) * MSEC_PER_SEC)); + + if (cfgOptionBool(cfgOptArchiveCopy)) + { + // Is the archive file compressed? + bool archiveCompressed = strEndsWithZ(archiveFile, "." GZIP_EXT); + + // Open the archive file + StorageRead *read = storageNewReadP( + storageRepo(), strNewFmt(STORAGE_REPO_ARCHIVE "/%s/%s", strPtr(archiveId), strPtr(archiveFile))); + IoFilterGroup *filterGroup = ioReadFilterGroup(storageReadIo(read)); + + // Decrypt with archive key if encrypted + cipherBlockFilterGroupAdd( + filterGroup, cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeDecrypt, + infoArchiveCipherPass(infoArchive)); + + // Compress or decompress if archive and backup do not have the same compression settings + if (archiveCompressed != cfgOptionBool(cfgOptCompress)) + { + if (archiveCompressed) + ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), gzipDecompressNew(false)); + else + ioFilterGroupAdd(filterGroup, gzipCompressNew(cfgOptionInt(cfgOptCompressLevel), false)); + } + + // Encrypt with backup key if encrypted + cipherBlockFilterGroupAdd( + filterGroup, cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeEncrypt, + manifestCipherSubPass(manifest)); + + // Add size filter last to calculate repo size + ioFilterGroupAdd(filterGroup, ioSizeNew()); + + // Copy the file + const String *manifestName = strNewFmt( + MANIFEST_TARGET_PGDATA "/%s/%s", strPtr(pgWalPath(manifestData(manifest)->pgVersion)), strPtr(walSegment)); + + storageCopyP( + read, + storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_BACKUP "/%s/%s%s", strPtr(manifestData(manifest)->backupLabel), strPtr(manifestName), + cfgOptionBool(cfgOptCompress) ? "." GZIP_EXT : ""))); + + // Add to manifest + ManifestFile file = + { + .name = manifestName, + .primary = true, + .mode = basePath->mode & (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH), + .user = basePath->user, + .group = basePath->group, + .size = walSegmentSize, + .sizeRepo = varUInt64Force(ioFilterGroupResult(filterGroup, SIZE_FILTER_TYPE_STR)), + .timestamp = manifestData(manifest)->backupTimestampStop, + }; + + memcpy(file.checksumSha1, strPtr(strSubN(archiveFile, 25, 40)), HASH_TYPE_SHA1_SIZE_HEX + 1); + + manifestFileAdd(manifest, &file); + } + } + } + MEM_CONTEXT_TEMP_END(); + } + + FUNCTION_LOG_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Save and update all files required to complete the backup +***********************************************************************************************************************************/ +static void +backupComplete(InfoBackup *const infoBackup, Manifest *const manifest) +{ + FUNCTION_LOG_BEGIN(logLevelDebug); + FUNCTION_LOG_PARAM(INFO_BACKUP, infoBackup); + FUNCTION_LOG_PARAM(MANIFEST, manifest); + FUNCTION_LOG_END(); + + ASSERT(manifest != NULL); + + MEM_CONTEXT_TEMP_BEGIN() + { + const String *const backupLabel = manifestData(manifest)->backupLabel; + + // Final save of the backup manifest + // ------------------------------------------------------------------------------------------------------------------------- + backupManifestSaveCopy(manifest, infoPgCipherPass(infoBackupPg(infoBackup))); + + storageCopy( + storageNewReadP( + storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(backupLabel))), + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strPtr(backupLabel)))); + + // Copy a compressed version of the manifest to history. If the repo is encrypted then the passphrase to open the manifest + // is required. We can't just do a straight copy since the destination needs to be compressed and that must happen before + // encryption in order to be efficient. + // ------------------------------------------------------------------------------------------------------------------------- + StorageRead *manifestRead = storageNewReadP( + storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE, strPtr(backupLabel))); + + cipherBlockFilterGroupAdd( + ioReadFilterGroup(storageReadIo(manifestRead)), cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeDecrypt, + infoPgCipherPass(infoBackupPg(infoBackup))); + + StorageWrite *manifestWrite = storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY "/%s/%s.manifest." GZIP_EXT, strPtr(strSubN(backupLabel, 0, 4)), + strPtr(backupLabel))); + + ioFilterGroupAdd(ioWriteFilterGroup(storageWriteIo(manifestWrite)), gzipCompressNew(9, false)); + + cipherBlockFilterGroupAdd( + ioWriteFilterGroup(storageWriteIo(manifestWrite)), cipherType(cfgOptionStr(cfgOptRepoCipherType)), cipherModeEncrypt, + infoPgCipherPass(infoBackupPg(infoBackup))); + + storageCopyP(manifestRead, manifestWrite); + + // Sync history path if required + if (storageFeature(storageRepoWrite(), storageFeaturePathSync)) + storagePathSyncP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_PATH_HISTORY)); + + // Create a symlink to the most recent backup if supported. This link is purely informational for the user and is never + // used by us since symlinks are not supported on all storage types. + // ------------------------------------------------------------------------------------------------------------------------- + const String *const latestLink = storagePathP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/" BACKUP_LINK_LATEST)); + + // Remove an existing latest link/file in case symlink capabilities have changed + storageRemoveP(storageRepoWrite(), latestLink); + + if (storageFeature(storageRepoWrite(), storageFeatureSymLink)) + { + THROW_ON_SYS_ERROR_FMT( + symlink(strPtr(backupLabel), strPtr(latestLink)) == -1, FileOpenError, + "unable to create symlink '%s' to '%s'", strPtr(latestLink), strPtr(backupLabel)); + } + + // Sync backup path if required + if (storageFeature(storageRepoWrite(), storageFeaturePathSync)) + storagePathSyncP(storageRepoWrite(), STORAGE_REPO_BACKUP_STR); + + // Add manifest and save backup.info (infoBackupSaveFile() is responsible for proper syncing) + // ------------------------------------------------------------------------------------------------------------------------- + infoBackupDataAdd(infoBackup, manifest); + + infoBackupSaveFile( + infoBackup, storageRepoWrite(), INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), + cfgOptionStr(cfgOptRepoCipherPass)); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN_VOID(); +} + +/*********************************************************************************************************************************** +Make a backup +***********************************************************************************************************************************/ +void +cmdBackup(void) +{ + FUNCTION_LOG_VOID(logLevelDebug); + + // Verify the repo is local + repoIsLocalVerify(); + + // Test for stop file + lockStopTest(); + + MEM_CONTEXT_TEMP_BEGIN() + { + // Load backup.info + InfoBackup *infoBackup = infoBackupLoadFileReconstruct( + storageRepo(), INFO_BACKUP_PATH_FILE_STR, cipherType(cfgOptionStr(cfgOptRepoCipherType)), + cfgOptionStr(cfgOptRepoCipherPass)); + InfoPgData infoPg = infoPgDataCurrent(infoBackupPg(infoBackup)); + const String *cipherPassBackup = infoPgCipherPass(infoBackupPg(infoBackup)); + + // Get pg storage and database objects + BackupData *backupData = backupInit(infoBackup); + + // Get the start timestamp which will later be written into the manifest to track total backup time + time_t timestampStart = backupTime(backupData, false); + + // Check if there is a prior manifest when backup type is diff/incr + Manifest *manifestPrior = backupBuildIncrPrior(infoBackup); + + // Start the backup + BackupStartResult backupStartResult = backupStart(backupData); + + // Build the manifest + Manifest *manifest = manifestNewBuild( + backupData->storagePrimary, infoPg.version, cfgOptionBool(cfgOptOnline), cfgOptionBool(cfgOptChecksumPage), + strLstNewVarLst(cfgOptionLst(cfgOptExclude)), backupStartResult.tablespaceList); + + // Validate the manifest using the copy start time + manifestBuildValidate(manifest, cfgOptionBool(cfgOptDelta), backupTime(backupData, true), cfgOptionBool(cfgOptCompress)); + + // Build an incremental backup if type is not full (manifestPrior will be freed in this call) + if (!backupBuildIncr(infoBackup, manifest, manifestPrior, backupStartResult.walSegmentName)) + manifestCipherSubPassSet(manifest, cipherPassGen(cipherType(cfgOptionStr(cfgOptRepoCipherType)))); + + // Set delta if it is not already set and the manifest requires it + if (!cfgOptionBool(cfgOptDelta) && varBool(manifestData(manifest)->backupOptionDelta)) + cfgOptionSet(cfgOptDelta, cfgSourceParam, BOOL_TRUE_VAR); + + // Resume a backup when possible + if (!backupResume(manifest, cipherPassBackup)) + { + manifestBackupLabelSet( + manifest, + backupLabelCreate(backupType(cfgOptionStr(cfgOptType)), manifestData(manifest)->backupLabelPrior, timestampStart)); + } + + // Save the manifest before processing starts + backupManifestSaveCopy(manifest, cipherPassBackup); + + // Process the backup manifest + backupProcess(backupData, manifest, backupStartResult.lsn, cipherPassBackup); + + // Stop the backup + BackupStopResult backupStopResult = backupStop(backupData, manifest); + + // Complete manifest + manifestBuildComplete( + manifest, timestampStart, backupStartResult.lsn, backupStartResult.walSegmentName, backupStopResult.timestamp, + backupStopResult.lsn, backupStopResult.walSegmentName, infoPg.id, infoPg.systemId, backupStartResult.dbList, + cfgOptionBool(cfgOptOnline) && cfgOptionBool(cfgOptArchiveCheck), + !cfgOptionBool(cfgOptOnline) || (cfgOptionBool(cfgOptArchiveCheck) && cfgOptionBool(cfgOptArchiveCopy)), + cfgOptionUInt(cfgOptBufferSize), cfgOptionUInt(cfgOptCompressLevel), cfgOptionUInt(cfgOptCompressLevelNetwork), + cfgOptionBool(cfgOptRepoHardlink), cfgOptionUInt(cfgOptProcessMax), cfgOptionBool(cfgOptBackupStandby)); + + // The primary db object won't be used anymore so free it + dbFree(backupData->dbPrimary); + + // The primary protocol connection won't be used anymore so free it. Any further access to the primary storage object may + // result in an error (likely eof). + protocolRemoteFree(backupData->pgIdPrimary); + + // Check and copy WAL segments required to make the backup consistent + backupArchiveCheckCopy(manifest, backupData->walSegmentSize, cipherPassBackup); + + // Complete the backup + LOG_INFO_FMT("new backup label = %s", strPtr(manifestData(manifest)->backupLabel)); + backupComplete(infoBackup, manifest); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_LOG_RETURN_VOID(); +} diff --git a/src/command/backup/backup.h b/src/command/backup/backup.h new file mode 100644 index 000000000..ca847a061 --- /dev/null +++ b/src/command/backup/backup.h @@ -0,0 +1,12 @@ +/*********************************************************************************************************************************** +Backup Command +***********************************************************************************************************************************/ +#ifndef COMMAND_BACKUP_BACKUP_H +#define COMMAND_BACKUP_BACKUP_H + +/*********************************************************************************************************************************** +Functions +***********************************************************************************************************************************/ +void cmdBackup(void); + +#endif diff --git a/src/command/backup/protocol.c b/src/command/backup/protocol.c index 1ac910f57..7631056c5 100644 --- a/src/command/backup/protocol.c +++ b/src/command/backup/protocol.c @@ -40,13 +40,12 @@ backupProtocol(const String *command, const VariantList *paramList, ProtocolServ { // Backup the file BackupFileResult result = backupFile( - varStr(varLstGet(paramList, 0)), varBoolForce(varLstGet(paramList, 1)), varUInt64(varLstGet(paramList, 2)), - varStr(varLstGet(paramList, 3)), varBoolForce(varLstGet(paramList, 4)), - varUInt64(varLstGet(paramList, 5)) << 32 | varUInt64(varLstGet(paramList, 6)), varStr(varLstGet(paramList, 7)), - varBoolForce(varLstGet(paramList, 8)), varBoolForce(varLstGet(paramList, 9)), - varUIntForce(varLstGet(paramList, 10)), varStr(varLstGet(paramList, 11)), varBoolForce(varLstGet(paramList, 12)), - varLstSize(paramList) == 14 ? cipherTypeAes256Cbc : cipherTypeNone, - varLstSize(paramList) == 14 ? varStr(varLstGet(paramList, 13)) : NULL); + varStr(varLstGet(paramList, 0)), varBool(varLstGet(paramList, 1)), varUInt64(varLstGet(paramList, 2)), + varStr(varLstGet(paramList, 3)), varBool(varLstGet(paramList, 4)), + varUInt64(varLstGet(paramList, 5)), varStr(varLstGet(paramList, 6)), + varBool(varLstGet(paramList, 7)), varBool(varLstGet(paramList, 8)), varUIntForce(varLstGet(paramList, 9)), + varStr(varLstGet(paramList, 10)), varBool(varLstGet(paramList, 11)), + varStr(varLstGet(paramList, 12)) == NULL ? cipherTypeNone : cipherTypeAes256Cbc, varStr(varLstGet(paramList, 12))); // Return backup result VariantList *resultList = varLstNew(); diff --git a/src/main.c b/src/main.c index 4ba54cdd2..e79321055 100644 --- a/src/main.c +++ b/src/main.c @@ -9,6 +9,7 @@ Main #include "command/archive/get/get.h" #include "command/archive/push/push.h" +#include "command/backup/backup.h" #include "command/check/check.h" #include "command/command.h" #include "command/control/start.h" @@ -106,16 +107,8 @@ main(int argListSize, const char *argList[]) // ----------------------------------------------------------------------------------------------------------------- case cfgCmdBackup: { -#ifdef DEBUG - // Check pg_control during testing so errors are more obvious. Otherwise errors only happen in - // archive-get/archive-push and end up in the PostgreSQL log which is not output in CI. This can be removed - // once backup is written in C. - if (cfgOptionBool(cfgOptOnline) && !cfgOptionBool(cfgOptBackupStandby) && !cfgOptionTest(cfgOptPgHost)) - pgControlFromFile(storagePg()); -#endif - // Run backup - perlExec(); + cmdBackup(); // Switch to expire command cmdEnd(0, NULL); diff --git a/src/perl/embed.auto.c b/src/perl/embed.auto.c index f9ceed4f8..6df43ea33 100644 --- a/src/perl/embed.auto.c +++ b/src/perl/embed.auto.c @@ -45,202 +45,6 @@ static const EmbeddedModule embeddedModule[] = "\n\n\n\n" "use constant PG_WAL_SEGMENT_SIZE => 16777216;\n" "push @EXPORT, qw(PG_WAL_SEGMENT_SIZE);\n" - "\n\n\n\n\n\n" - "sub lsnNormalize\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strLsn,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::lsnFile', \\@_,\n" - "{name => 'strLsn', trace => true},\n" - ");\n" - "\n\n" - "my @stryLsnSplit = split('/', $strLsn);\n" - "\n" - "if (@stryLsnSplit != 2)\n" - "{\n" - "confess &log(ASSERT, \"invalid lsn ${strLsn}\");\n" - "}\n" - "\n" - "my $strLsnNormal = uc(sprintf(\"%08s%08s\", $stryLsnSplit[0], $stryLsnSplit[1]));\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'strLsnNormal', value => $strLsnNormal, trace => true}\n" - ");\n" - "\n" - "}\n" - "\n" - "push @EXPORT, qw(lsnNormalize);\n" - "\n\n\n\n\n\n\n" - "sub lsnFileRange\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strLsnStart,\n" - "$strLsnStop,\n" - "$strDbVersion,\n" - "$iWalSegmentSize,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::lsnFileRange', \\@_,\n" - "{name => 'strLsnStart'},\n" - "{name => 'strLsnStop'},\n" - "{name => '$strDbVersion'},\n" - "{name => '$iWalSegmentSize'},\n" - ");\n" - "\n\n" - "my @stryArchive;\n" - "my $iArchiveIdx = 0;\n" - "my $bSkipFF = $strDbVersion < PG_VERSION_93;\n" - "\n\n" - "my @stryArchiveSplit = split('/', $strLsnStart);\n" - "my $iStartMajor = hex($stryArchiveSplit[0]);\n" - "my $iStartMinor = int(hex($stryArchiveSplit[1]) / $iWalSegmentSize);\n" - "\n" - "@stryArchiveSplit = split('/', $strLsnStop);\n" - "my $iStopMajor = hex($stryArchiveSplit[0]);\n" - "my $iStopMinor = int(hex($stryArchiveSplit[1]) / $iWalSegmentSize);\n" - "\n" - "$stryArchive[$iArchiveIdx] = uc(sprintf(\"%08x%08x\", $iStartMajor, $iStartMinor));\n" - "$iArchiveIdx += 1;\n" - "\n" - "while (!($iStartMajor == $iStopMajor && $iStartMinor == $iStopMinor))\n" - "{\n" - "$iStartMinor += 1;\n" - "\n" - "if ($bSkipFF && $iStartMinor == 255 || !$bSkipFF && $iStartMinor > int(0xFFFFFFFF / $iWalSegmentSize))\n" - "{\n" - "$iStartMajor += 1;\n" - "$iStartMinor = 0;\n" - "}\n" - "\n" - "$stryArchive[$iArchiveIdx] = uc(sprintf(\"%08x%08x\", $iStartMajor, $iStartMinor));\n" - "$iArchiveIdx += 1;\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'stryWalFileName', value => \\@stryArchive}\n" - ");\n" - "}\n" - "\n" - "push @EXPORT, qw(lsnFileRange);\n" - "\n\n\n\n\n\n\n" - "sub walSegmentFind\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$oStorageRepo,\n" - "$strArchiveId,\n" - "$strWalSegment,\n" - "$iWaitSeconds,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::walSegmentFind', \\@_,\n" - "{name => 'oStorageRepo'},\n" - "{name => 'strArchiveId'},\n" - "{name => 'strWalSegment'},\n" - "{name => 'iWaitSeconds', required => false},\n" - ");\n" - "\n\n" - "if (!walIsSegment($strWalSegment))\n" - "{\n" - "confess &log(ERROR, \"${strWalSegment} is not a WAL segment\", ERROR_ASSERT);\n" - "}\n" - "\n\n" - "my $oWait = waitInit($iWaitSeconds);\n" - "my @stryWalFileName;\n" - "\n" - "do\n" - "{\n" - "\n" - "push(@stryWalFileName, $oStorageRepo->list(\n" - "STORAGE_REPO_ARCHIVE . \"/${strArchiveId}/\" . substr($strWalSegment, 0, 16),\n" - "{strExpression =>\n" - "'^' . substr($strWalSegment, 0, 24) . (walIsPartial($strWalSegment) ? \"\\\\.partial\" : '') .\n" - "\"-[0-f]{40}(\\\\.\" . COMPRESS_EXT . \"){0,1}\\$\",\n" - "bIgnoreMissing => true}));\n" - "}\n" - "while (@stryWalFileName == 0 && waitMore($oWait));\n" - "\n\n\n" - "if (@stryWalFileName > 1)\n" - "{\n" - "confess &log(ERROR,\n" - "\"duplicates found in archive for WAL segment ${strWalSegment}: \" . join(', ', @stryWalFileName) .\n" - "\"\\nHINT: are multiple primaries archiving to this stanza?\",\n" - "ERROR_ARCHIVE_DUPLICATE);\n" - "}\n" - "\n\n" - "if (@stryWalFileName == 0 && defined($iWaitSeconds))\n" - "{\n" - "confess &log(\n" - "ERROR,\n" - "\"could not find WAL segment ${strWalSegment} after ${iWaitSeconds} second(s)\" .\n" - "\"\\nHINT: is archive_command configured correctly?\" .\n" - "\"\\nHINT: use the check command to verify that PostgreSQL is archiving.\",\n" - "ERROR_ARCHIVE_TIMEOUT);\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'strWalFileName', value => $stryWalFileName[0]}\n" - ");\n" - "}\n" - "\n" - "push @EXPORT, qw(walSegmentFind);\n" - "\n\n\n\n\n\n" - "sub walIsSegment\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strWalFile,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::walIsSegment', \\@_,\n" - "{name => 'strWalFile', trace => true},\n" - ");\n" - "\n" - "return $strWalFile =~ /^[0-F]{24}(\\.partial){0,1}$/ ? true : false;\n" - "}\n" - "\n" - "push @EXPORT, qw(walIsSegment);\n" - "\n\n\n\n\n\n" - "sub walIsPartial\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strWalFile,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::walIsPartial', \\@_,\n" - "{name => 'strWalFile', trace => true},\n" - ");\n" - "\n" - "return walIsSegment($strWalFile) && $strWalFile =~ /\\.partial$/ ? true : false;\n" - "}\n" - "\n" - "push @EXPORT, qw(walIsPartial);\n" "\n" "1;\n" }, @@ -748,969 +552,6 @@ static const EmbeddedModule embeddedModule[] = "\n" "1;\n" }, - { - .name = "pgBackRest/Backup/Backup.pm", - .data = - "\n\n\n" - "package pgBackRest::Backup::Backup;\n" - "\n" - "use strict;\n" - "use warnings FATAL => qw(all);\n" - "use Carp qw(confess);\n" - "use English '-no_match_vars';\n" - "\n" - "use Exporter qw(import);\n" - "use File::Basename;\n" - "\n" - "use pgBackRest::Archive::Common;\n" - "use pgBackRest::Backup::Common;\n" - "use pgBackRest::Backup::File;\n" - "use pgBackRest::Backup::Info;\n" - "use pgBackRest::Common::Cipher;\n" - "use pgBackRest::Common::Exception;\n" - "use pgBackRest::Common::Ini;\n" - "use pgBackRest::Common::Log;\n" - "use pgBackRest::Common::Wait;\n" - "use pgBackRest::Common::String;\n" - "use pgBackRest::Config::Config;\n" - "use pgBackRest::Db;\n" - "use pgBackRest::DbVersion;\n" - "use pgBackRest::Manifest;\n" - "use pgBackRest::Protocol::Local::Process;\n" - "use pgBackRest::Protocol::Helper;\n" - "use pgBackRest::Protocol::Storage::Helper;\n" - "use pgBackRest::Common::Io::Handle;\n" - "use pgBackRest::Storage::Base;\n" - "use pgBackRest::Storage::Helper;\n" - "use pgBackRest::Version;\n" - "\n\n\n\n" - "sub new\n" - "{\n" - "my $class = shift;\n" - "\n\n" - "my $self = {};\n" - "bless $self, $class;\n" - "\n\n" - "my ($strOperation) = logDebugParam(__PACKAGE__ . '->new');\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'self', value => $self}\n" - ");\n" - "}\n" - "\n\n\n\n" - "sub resumeClean\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my\n" - "(\n" - "$strOperation,\n" - "$oStorageRepo,\n" - "$strBackupLabel,\n" - "$oManifest,\n" - "$oAbortedManifest,\n" - "$bOnline,\n" - "$bDelta,\n" - "$strTimelineCurrent,\n" - "$strTimelineLast,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '->resumeClean', \\@_,\n" - "{name => 'oStorageRepo'},\n" - "{name => 'strBackupLabel'},\n" - "{name => 'oManifest'},\n" - "{name => 'oAbortedManifest'},\n" - "{name => 'bOnline'},\n" - "{name => 'bDelta'},\n" - "{name => 'strTimelineCurrent', required => false},\n" - "{name => 'strTimelineLast', required => false},\n" - ");\n" - "\n" - "&log(DETAIL, 'clean resumed backup path: ' . $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . \"/${strBackupLabel}\"));\n" - "\n\n" - "my $hFile = $oStorageRepo->manifest(STORAGE_REPO_BACKUP . \"/${strBackupLabel}\");\n" - "\n\n" - "my $bCompressed = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS);\n" - "\n" - "if (!$bDelta)\n" - "{\n" - "\n" - "$bDelta = $oAbortedManifest->checkDelta(\n" - "'resumed', $oAbortedManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ONLINE, undef, $bOnline),\n" - "$strTimelineCurrent, $strTimelineLast);\n" - "\n\n" - "if (!$bDelta)\n" - "{\n" - "my @stryFileList = ();\n" - "\n" - "foreach my $strName (sort(keys(%{$hFile})))\n" - "{\n" - "\n" - "if ($strName eq FILE_MANIFEST_COPY ||\n" - "$strName eq '.')\n" - "{\n" - "next;\n" - "}\n" - "\n" - "if ($hFile->{$strName}{type} eq 'f')\n" - "{\n" - "\n" - "my $strFile = $strName;\n" - "\n" - "if ($bCompressed)\n" - "{\n" - "$strFile = substr($strFile, 0, length($strFile) - 3);\n" - "}\n" - "\n\n\n" - "if ($oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile) &&\n" - "!$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE) &&\n" - "$oAbortedManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM))\n" - "{\n" - "push(@stryFileList, $strFile);\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "if (@stryFileList)\n" - "{\n" - "$bDelta = $oManifest->checkDeltaFile(\\@stryFileList, $oAbortedManifest, undef);\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "my @stryFile;\n" - "\n" - "foreach my $strName (sort(keys(%{$hFile})))\n" - "{\n" - "\n" - "if ($strName eq FILE_MANIFEST_COPY ||\n" - "$strName eq '.')\n" - "{\n" - "next;\n" - "}\n" - "\n\n" - "my $cType = $hFile->{$strName}{type};\n" - "\n\n" - "if ($cType eq 'd')\n" - "{\n" - "if ($oManifest->test(MANIFEST_SECTION_TARGET_PATH, $strName))\n" - "{\n" - "next;\n" - "}\n" - "}\n" - "\n" - "elsif ($cType eq 'f')\n" - "{\n" - "\n" - "my $strFile = $strName;\n" - "\n" - "if ($bCompressed)\n" - "{\n" - "$strFile = substr($strFile, 0, length($strFile) - 3);\n" - "}\n" - "\n\n" - "if ($oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile) &&\n" - "!$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE))\n" - "{\n" - "\n" - "my $strChecksum = $oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM, false);\n" - "\n\n\n\n\n" - "if (defined($strChecksum) &&\n" - "$oManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_SIZE) ==\n" - "$oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_SIZE) &&\n" - "($bDelta ||\n" - "$oManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_TIMESTAMP) ==\n" - "$oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_TIMESTAMP)))\n" - "{\n" - "$oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksum);\n" - "\n\n" - "my $bChecksumPage =\n" - "$oAbortedManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, false);\n" - "\n" - "if (defined($bChecksumPage))\n" - "{\n" - "$oManifest->boolSet(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, $bChecksumPage);\n" - "\n" - "if (!$bChecksumPage &&\n" - "$oAbortedManifest->test(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR))\n" - "{\n" - "$oManifest->set(\n" - "MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR,\n" - "$oAbortedManifest->get(\n" - "MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR));\n" - "}\n" - "}\n" - "\n" - "next;\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "if ($cType eq 'd')\n" - "{\n" - "logDebugMisc($strOperation, \"remove path ${strName}\");\n" - "$oStorageRepo->pathRemove(STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strName}\", {bRecurse => true});\n" - "}\n" - "\n" - "else\n" - "{\n" - "logDebugMisc($strOperation, \"remove file ${strName}\");\n" - "push(@stryFile, STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strName}\");\n" - "}\n" - "}\n" - "\n\n" - "if (@stryFile > 0)\n" - "{\n" - "$oStorageRepo->remove(\\@stryFile);\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'bDelta', value => $bDelta, trace => true},\n" - ");\n" - "}\n" - "\n\n\n\n\n\n\n" - "sub processManifest\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strDbMasterPath,\n" - "$strDbCopyPath,\n" - "$strType,\n" - "$strDbVersion,\n" - "$bCompress,\n" - "$bHardLink,\n" - "$oBackupManifest,\n" - "$strBackupLabel,\n" - "$strLsnStart,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '->processManifest', \\@_,\n" - "{name => 'strDbMasterPath'},\n" - "{name => 'strDbCopyPath'},\n" - "{name => 'strType'},\n" - "{name => 'strDbVersion'},\n" - "{name => 'bCompress'},\n" - "{name => 'bHardLink'},\n" - "{name => 'oBackupManifest'},\n" - "{name => 'strBackupLabel'},\n" - "{name => 'strLsnStart', required => false},\n" - ");\n" - "\n\n" - "my $oProtocolMaster =\n" - "!isDbLocal({iRemoteIdx => $self->{iMasterRemoteIdx}}) ?\n" - "protocolGet(CFGOPTVAL_REMOTE_TYPE_DB, $self->{iMasterRemoteIdx}) : undef;\n" - "defined($oProtocolMaster) && $oProtocolMaster->noOp();\n" - "\n\n" - "my $oBackupProcess = new pgBackRest::Protocol::Local::Process(CFGOPTVAL_LOCAL_TYPE_DB);\n" - "\n" - "if ($self->{iCopyRemoteIdx} != $self->{iMasterRemoteIdx})\n" - "{\n" - "$oBackupProcess->hostAdd($self->{iMasterRemoteIdx}, 1);\n" - "}\n" - "\n" - "$oBackupProcess->hostAdd($self->{iCopyRemoteIdx}, cfgOption(CFGOPT_PROCESS_MAX));\n" - "\n\n" - "my $lFileTotal = 0;\n" - "my $lSizeTotal = 0;\n" - "\n\n" - "if ($bHardLink || $strType eq CFGOPTVAL_BACKUP_TYPE_FULL)\n" - "{\n" - "\n" - "foreach my $strPath ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_PATH))\n" - "{\n" - "storageRepo()->pathCreate(STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strPath}\", {bIgnoreExists => true});\n" - "}\n" - "\n" - "if (storageRepo()->capability(STORAGE_CAPABILITY_LINK))\n" - "{\n" - "for my $strTarget ($oBackupManifest->keys(MANIFEST_SECTION_BACKUP_TARGET))\n" - "{\n" - "if ($oBackupManifest->isTargetTablespace($strTarget))\n" - "{\n" - "storageRepo()->linkCreate(\n" - "STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strTarget}\",\n" - "STORAGE_REPO_BACKUP . \"/${strBackupLabel}/\" . MANIFEST_TARGET_PGDATA . \"/${strTarget}\",\n" - "{bRelative => true});\n" - "}\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "foreach my $strRepoFile (\n" - "sort {sprintf(\"%016d-%s\", $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $b, MANIFEST_SUBKEY_SIZE), $b) cmp\n" - "sprintf(\"%016d-%s\", $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $a, MANIFEST_SUBKEY_SIZE), $a)}\n" - "($oBackupManifest->keys(MANIFEST_SECTION_TARGET_FILE, INI_SORT_NONE)))\n" - "{\n" - "\n\n" - "my $strReference = $oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REFERENCE, false);\n" - "\n" - "if (defined($strReference))\n" - "{\n" - "\n\n" - "if (!cfgOption(CFGOPT_DELTA) ||\n" - "$oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE) == 0)\n" - "{\n" - "\n" - "next;\n" - "}\n" - "}\n" - "\n\n" - "my $strQueueKey = MANIFEST_TARGET_PGDATA;\n" - "\n\n" - "if (index($strRepoFile, DB_PATH_PGTBLSPC . '/') == 0)\n" - "{\n" - "$strQueueKey = DB_PATH_PGTBLSPC . '/' . (split('\\/', $strRepoFile))[1];\n" - "}\n" - "\n\n" - "my $bIgnoreMissing = true;\n" - "my $strDbFile = $oBackupManifest->dbPathGet($strDbCopyPath, $strRepoFile);\n" - "my $iHostConfigIdx = $self->{iCopyRemoteIdx};\n" - "\n\n" - "if ($oBackupManifest->boolGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_MASTER))\n" - "{\n" - "$strDbFile = $oBackupManifest->dbPathGet($strDbMasterPath, $strRepoFile);\n" - "$iHostConfigIdx = $self->{iMasterRemoteIdx};\n" - "}\n" - "\n\n" - "if ($strRepoFile eq MANIFEST_TARGET_PGDATA . '/' . DB_FILE_PGCONTROL)\n" - "{\n" - "$bIgnoreMissing = false;\n" - "}\n" - "\n\n" - "my $lSize = $oBackupManifest->numericGet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE);\n" - "\n" - "$lFileTotal++;\n" - "$lSizeTotal += $lSize;\n" - "\n\n" - "$oBackupProcess->queueJob(\n" - "$iHostConfigIdx, $strQueueKey, $strRepoFile, OP_BACKUP_FILE,\n" - "[$strDbFile, $bIgnoreMissing, $lSize,\n" - "$oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, false),\n" - "cfgOption(CFGOPT_CHECKSUM_PAGE) ? isChecksumPage($strRepoFile) : false,\n" - "defined($strLsnStart) ? hex((split('/', $strLsnStart))[0]) : 0xFFFFFFFF,\n" - "defined($strLsnStart) ? hex((split('/', $strLsnStart))[1]) : 0xFFFFFFFF,\n" - "$strRepoFile, defined($strReference) ? true : false, $bCompress, cfgOption(CFGOPT_COMPRESS_LEVEL),\n" - "$strBackupLabel, cfgOption(CFGOPT_DELTA)],\n" - "{rParamSecure => $oBackupManifest->cipherPassSub() ? [$oBackupManifest->cipherPassSub()] : undef});\n" - "\n\n" - "$oBackupManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE);\n" - "$oBackupManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM);\n" - "}\n" - "\n\n" - "if (!$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, MANIFEST_FILE_PGCONTROL) && cfgOption(CFGOPT_ONLINE))\n" - "{\n" - "confess &log(ERROR, DB_FILE_PGCONTROL . \" must be present in all online backups\\n\" .\n" - "'HINT: is something wrong with the clock or filesystem timestamps?', ERROR_FILE_MISSING);\n" - "}\n" - "\n\n\n" - "if ($lFileTotal == 0)\n" - "{\n" - "confess &log(ERROR, \"no files have changed since the last backup - this seems unlikely\", ERROR_FILE_MISSING);\n" - "}\n" - "\n\n" - "my $lSizeCurrent = 0;\n" - "\n\n" - "my $lManifestSaveCurrent = 0;\n" - "my $lManifestSaveSize = int($lSizeTotal / 100);\n" - "\n" - "if (cfgOptionSource(CFGOPT_MANIFEST_SAVE_THRESHOLD) ne CFGDEF_SOURCE_DEFAULT ||\n" - "$lManifestSaveSize < cfgOption(CFGOPT_MANIFEST_SAVE_THRESHOLD))\n" - "{\n" - "$lManifestSaveSize = cfgOption(CFGOPT_MANIFEST_SAVE_THRESHOLD);\n" - "}\n" - "\n\n" - "while (my $hyJob = $oBackupProcess->process())\n" - "{\n" - "foreach my $hJob (@{$hyJob})\n" - "{\n" - "($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate(\n" - "$oBackupManifest, cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_HOST, $hJob->{iHostConfigIdx}), false),\n" - "$hJob->{iProcessId}, @{$hJob->{rParam}}[0], @{$hJob->{rParam}}[7], @{$hJob->{rParam}}[2], @{$hJob->{rParam}}[3],\n" - "@{$hJob->{rParam}}[4], @{$hJob->{rResult}}, $lSizeTotal, $lSizeCurrent, $lManifestSaveSize,\n" - "$lManifestSaveCurrent);\n" - "}\n" - "\n\n\n" - "protocolKeepAlive();\n" - "}\n" - "\n" - "foreach my $strFile ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_FILE))\n" - "{\n" - "\n\n" - "my $strReference = $oBackupManifest->get(MANIFEST_SECTION_TARGET_FILE, $strFile, MANIFEST_SUBKEY_REFERENCE, false);\n" - "\n" - "if ($strReference)\n" - "{\n" - "\n" - "if ($bHardLink)\n" - "{\n" - "&log(DETAIL, \"hardlink ${strFile} to ${strReference}\");\n" - "\n" - "storageRepo()->linkCreate(\n" - "STORAGE_REPO_BACKUP . \"/${strReference}/${strFile}\" . ($bCompress ? qw{.} . COMPRESS_EXT : ''),\n" - "STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strFile}\" . ($bCompress ? qw{.} . COMPRESS_EXT : ''),\n" - "{bHard => true});\n" - "}\n" - "\n\n" - "else\n" - "{\n" - "logDebugMisc($strOperation, \"reference ${strFile} to ${strReference}\");\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "$oBackupManifest->validate();\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'lSizeTotal', value => $lSizeTotal}\n" - ");\n" - "}\n" - "\n\n\n\n\n\n" - "sub process\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my ($strOperation) = logDebugParam(__PACKAGE__ . '->process');\n" - "\n\n" - "my $lTimestampStart = time();\n" - "\n\n" - "my $oStorageRepo = storageRepo();\n" - "\n\n" - "my $strType = cfgOption(CFGOPT_TYPE);\n" - "my $bCompress = cfgOption(CFGOPT_COMPRESS);\n" - "my $bHardLink = cfgOption(CFGOPT_REPO_HARDLINK);\n" - "\n\n" - "my $oBackupInfo = new pgBackRest::Backup::Info($oStorageRepo->pathGet(STORAGE_REPO_BACKUP));\n" - "\n\n" - "my $strCipherPassManifest = $oBackupInfo->cipherPassSub();\n" - "my $strCipherPassBackupSet;\n" - "\n\n" - "my $oDbMaster = undef;\n" - "my $oDbStandby = undef;\n" - "\n\n" - "($oDbMaster, $self->{iMasterRemoteIdx}, $oDbStandby, $self->{iCopyRemoteIdx}) = dbObjectGet();\n" - "\n\n" - "if (!defined($self->{iCopyRemoteIdx}))\n" - "{\n" - "$self->{iCopyRemoteIdx} = $self->{iMasterRemoteIdx};\n" - "}\n" - "\n\n\n" - "if (!defined($oDbStandby) && cfgOption(CFGOPT_BACKUP_STANDBY))\n" - "{\n" - "cfgOptionSet(CFGOPT_BACKUP_STANDBY, false);\n" - "&log(WARN, 'option backup-standby is enabled but standby is not properly configured - ' .\n" - "'backups will be performed from the master');\n" - "}\n" - "\n\n" - "my $oStorageDbMaster = storageDb({iRemoteIdx => $self->{iMasterRemoteIdx}});\n" - "\n\n" - "my $strDbMasterPath = cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_PATH, $self->{iMasterRemoteIdx}));\n" - "my $strDbCopyPath = cfgOption(cfgOptionIdFromIndex(CFGOPT_PG_PATH, $self->{iCopyRemoteIdx}));\n" - "\n\n" - "my ($strDbVersion, $iControlVersion, $iCatalogVersion, $ullDbSysId) = $oDbMaster->info();\n" - "\n" - "my $iDbHistoryId = $oBackupInfo->check($strDbVersion, $iControlVersion, $iCatalogVersion, $ullDbSysId);\n" - "\n\n" - "my $oLastManifest;\n" - "my $strBackupLastPath;\n" - "my $strTimelineLast;\n" - "\n" - "if ($strType ne CFGOPTVAL_BACKUP_TYPE_FULL)\n" - "{\n" - "$strBackupLastPath = $oBackupInfo->last(\n" - "$strType eq CFGOPTVAL_BACKUP_TYPE_DIFF ? CFGOPTVAL_BACKUP_TYPE_FULL : CFGOPTVAL_BACKUP_TYPE_INCR);\n" - "\n\n" - "if (defined($strBackupLastPath) && $oBackupInfo->confirmDb($strBackupLastPath, $strDbVersion, $ullDbSysId))\n" - "{\n" - "$oLastManifest = new pgBackRest::Manifest(\n" - "$oStorageRepo->pathGet(STORAGE_REPO_BACKUP . \"/${strBackupLastPath}/\" . FILE_MANIFEST),\n" - "{strCipherPass => $strCipherPassManifest});\n" - "\n\n" - "$strCipherPassBackupSet = $oLastManifest->cipherPassSub();\n" - "\n\n" - "if ($oLastManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP))\n" - "{\n" - "$strTimelineLast = substr($oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP), 0, 8);\n" - "}\n" - "\n" - "&log(INFO, 'last backup label = ' . $oLastManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL) .\n" - "', version = ' . $oLastManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION));\n" - "\n\n" - "my $strKey;\n" - "\n\n" - "if (!$oLastManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, $bCompress))\n" - "{\n" - "&log(WARN, \"${strType} backup cannot alter compress option to '\" . boolFormat($bCompress) .\n" - "\"', reset to value in ${strBackupLastPath}\");\n" - "$bCompress = $oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS);\n" - "}\n" - "\n\n" - "if (!$oLastManifest->boolTest(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, $bHardLink))\n" - "{\n" - "&log(WARN, \"${strType} backup cannot alter hardlink option to '\" . boolFormat($bHardLink) .\n" - "\"', reset to value in ${strBackupLastPath}\");\n" - "$bHardLink = $oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK);\n" - "}\n" - "}\n" - "else\n" - "{\n" - "&log(WARN, \"no prior backup exists, ${strType} backup has been changed to full\");\n" - "$strType = CFGOPTVAL_BACKUP_TYPE_FULL;\n" - "$strBackupLastPath = undef;\n" - "}\n" - "}\n" - "\n\n" - "my $strBackupLabel;\n" - "my $oAbortedManifest;\n" - "my $strBackupPath;\n" - "my $strTimelineAborted;\n" - "\n" - "foreach my $strAbortedBackup ($oStorageRepo->list(\n" - "STORAGE_REPO_BACKUP, {strExpression => backupRegExpGet(true, true, true), strSortOrder => 'reverse'}))\n" - "{\n" - "\n" - "if ($oStorageRepo->exists(STORAGE_REPO_BACKUP . \"/${strAbortedBackup}/\" . FILE_MANIFEST_COPY) &&\n" - "!$oStorageRepo->exists(STORAGE_REPO_BACKUP . \"/${strAbortedBackup}/\" . FILE_MANIFEST))\n" - "{\n" - "my $bUsable;\n" - "my $strReason = \"resume is disabled\";\n" - "$strBackupPath = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . \"/${strAbortedBackup}\");\n" - "\n\n\n" - "if (cfgOption(CFGOPT_RESUME))\n" - "{\n" - "$strReason = \"unable to read ${strBackupPath}/\" . FILE_MANIFEST;\n" - "\n" - "eval\n" - "{\n" - "\n" - "$oAbortedManifest = new pgBackRest::Manifest(\"${strBackupPath}/\" . FILE_MANIFEST,\n" - "{strCipherPass => $strCipherPassManifest});\n" - "\n\n" - "my $strKey;\n" - "my $strValueNew;\n" - "my $strValueAborted;\n" - "\n\n" - "if ($oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION) ne PROJECT_VERSION)\n" - "{\n" - "$strKey = INI_KEY_VERSION;\n" - "$strValueNew = PROJECT_VERSION;\n" - "$strValueAborted = $oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_VERSION);\n" - "}\n" - "\n" - "elsif ($oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_FORMAT) ne REPOSITORY_FORMAT)\n" - "{\n" - "$strKey = INI_KEY_FORMAT;\n" - "$strValueNew = REPOSITORY_FORMAT;\n" - "$strValueAborted = $oAbortedManifest->get(INI_SECTION_BACKREST, INI_KEY_FORMAT);\n" - "}\n" - "\n" - "elsif ($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE) ne $strType)\n" - "{\n" - "$strKey = MANIFEST_KEY_TYPE;\n" - "$strValueNew = $strType;\n" - "$strValueAborted = $oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE);\n" - "}\n" - "\n" - "elsif ($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, '') ne\n" - "(defined($strBackupLastPath) ? $strBackupLastPath : ''))\n" - "{\n" - "$strKey = MANIFEST_KEY_PRIOR;\n" - "$strValueNew = defined($strBackupLastPath) ? $strBackupLastPath : '';\n" - "$strValueAborted =\n" - "$oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_PRIOR, undef, false, '');\n" - "}\n" - "\n" - "elsif ($oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS) !=\n" - "cfgOption(CFGOPT_COMPRESS))\n" - "{\n" - "$strKey = MANIFEST_KEY_COMPRESS;\n" - "$strValueNew = cfgOption(CFGOPT_COMPRESS);\n" - "$strValueAborted = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS);\n" - "}\n" - "\n" - "elsif ($oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK) !=\n" - "cfgOption(CFGOPT_REPO_HARDLINK))\n" - "{\n" - "$strKey = MANIFEST_KEY_HARDLINK;\n" - "$strValueNew = cfgOption(CFGOPT_REPO_HARDLINK);\n" - "$strValueAborted = $oAbortedManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK);\n" - "}\n" - "\n\n" - "if (defined($strKey))\n" - "{\n" - "$strReason = \"new ${strKey} '${strValueNew}' does not match aborted ${strKey} '${strValueAborted}'\";\n" - "}\n" - "\n" - "else\n" - "{\n" - "$bUsable = true;\n" - "}\n" - "\n" - "return true;\n" - "}\n" - "or do\n" - "{\n" - "$bUsable = false;\n" - "}\n" - "}\n" - "\n\n" - "if ($bUsable)\n" - "{\n" - "$strBackupLabel = $strAbortedBackup;\n" - "\n\n" - "if (defined($strCipherPassManifest))\n" - "{\n" - "$strCipherPassBackupSet = $oAbortedManifest->cipherPassSub();\n" - "}\n" - "\n\n\n" - "if ($oAbortedManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP))\n" - "{\n" - "$strTimelineAborted = substr($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP), 0, 8);\n" - "}\n" - "elsif ($oAbortedManifest->test(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START))\n" - "{\n" - "$strTimelineAborted = substr($oAbortedManifest->get(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START), 0, 8);\n" - "}\n" - "}\n" - "else\n" - "{\n" - "&log(WARN, \"aborted backup ${strAbortedBackup} cannot be resumed: ${strReason}\");\n" - "\n" - "$oStorageRepo->pathRemove(STORAGE_REPO_BACKUP . \"/${strAbortedBackup}\", {bRecurse => true});\n" - "undef($oAbortedManifest);\n" - "}\n" - "\n" - "last;\n" - "}\n" - "}\n" - "\n\n" - "if (defined($strCipherPassManifest) && !defined($strCipherPassBackupSet) && $strType eq CFGOPTVAL_BACKUP_TYPE_FULL)\n" - "{\n" - "$strCipherPassBackupSet = cipherPassGen();\n" - "}\n" - "\n\n" - "if (!defined($strBackupLabel))\n" - "{\n" - "$strBackupLabel = backupLabel($oStorageRepo, $strType, $strBackupLastPath, $lTimestampStart);\n" - "$strBackupPath = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . \"/${strBackupLabel}\");\n" - "}\n" - "\n\n\n\n" - "my $oBackupManifest = new pgBackRest::Manifest(\"$strBackupPath/\" . FILE_MANIFEST,\n" - "{bLoad => false, strDbVersion => $strDbVersion, iDbCatalogVersion => $iCatalogVersion,\n" - "strCipherPass => defined($strCipherPassManifest) ? $strCipherPassManifest : undef,\n" - "strCipherPassSub => defined($strCipherPassManifest) ? $strCipherPassBackupSet : undef});\n" - "\n\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TYPE, undef, $strType);\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_START, undef, $lTimestampStart);\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BACKUP_STANDBY, undef, cfgOption(CFGOPT_BACKUP_STANDBY));\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_BUFFER_SIZE, undef, cfgOption(CFGOPT_BUFFER_SIZE));\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, $bCompress);\n" - "$oBackupManifest->numericSet(\n" - "MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS_LEVEL, undef, cfgOption(CFGOPT_COMPRESS_LEVEL));\n" - "$oBackupManifest->numericSet(\n" - "MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS_LEVEL_NETWORK, undef, cfgOption(CFGOPT_COMPRESS_LEVEL_NETWORK));\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_HARDLINK, undef, $bHardLink);\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ONLINE, undef, cfgOption(CFGOPT_ONLINE));\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ARCHIVE_COPY, undef,\n" - "!cfgOption(CFGOPT_ONLINE) ||\n" - "(cfgOption(CFGOPT_ARCHIVE_CHECK) && cfgOption(CFGOPT_ARCHIVE_COPY)));\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ARCHIVE_CHECK, undef,\n" - "cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_ARCHIVE_CHECK));\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_PROCESS_MAX, undef, cfgOption(CFGOPT_PROCESS_MAX));\n" - "\n\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_DB_ID, undef, $iDbHistoryId);\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_CONTROL, undef, $iControlVersion);\n" - "$oBackupManifest->numericSet(MANIFEST_SECTION_BACKUP_DB, MANIFEST_KEY_SYSTEM_ID, undef, $ullDbSysId);\n" - "\n\n" - "if (cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_BACKUP_STANDBY) && $strDbVersion < PG_VERSION_BACKUP_STANDBY)\n" - "{\n" - "confess &log(ERROR,\n" - "'option \\'' . cfgOptionName(CFGOPT_BACKUP_STANDBY) . '\\' not valid for PostgreSQL < ' . PG_VERSION_BACKUP_STANDBY,\n" - "ERROR_CONFIG);\n" - "}\n" - "\n\n" - "my $strArchiveStart = undef;\n" - "my $strLsnStart = undef;\n" - "my $iWalSegmentSize = undef;\n" - "my $hTablespaceMap = undef;\n" - "my $hDatabaseMap = undef;\n" - "my $strTimelineCurrent = undef;\n" - "\n\n" - "if (!cfgOption(CFGOPT_ONLINE))\n" - "{\n" - "\n\n\n" - "if (!cfgOptionTest(CFGOPT_CHECKSUM_PAGE))\n" - "{\n" - "cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false);\n" - "}\n" - "\n\n" - "if ($oStorageDbMaster->exists($strDbMasterPath . '/' . DB_FILE_POSTMASTERPID))\n" - "{\n" - "if (cfgOption(CFGOPT_FORCE))\n" - "{\n" - "&log(WARN, '--no-online passed and ' . DB_FILE_POSTMASTERPID . ' exists but --force was passed so backup will ' .\n" - "'continue though it looks like the postmaster is running and the backup will probably not be ' .\n" - "'consistent');\n" - "}\n" - "else\n" - "{\n" - "confess &log(ERROR, '--no-online passed but ' . DB_FILE_POSTMASTERPID . ' exists - looks like the postmaster is ' .\n" - "'running. Shutdown the postmaster and try again, or use --force.', ERROR_POSTMASTER_RUNNING);\n" - "}\n" - "}\n" - "}\n" - "\n" - "else\n" - "{\n" - "\n" - "($strArchiveStart, $strLsnStart, $iWalSegmentSize) =\n" - "$oDbMaster->backupStart(\n" - "PROJECT_NAME . ' backup started at ' . timestampFormat(undef, $lTimestampStart), cfgOption(CFGOPT_START_FAST));\n" - "\n\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_START, undef, $strArchiveStart);\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_START, undef, $strLsnStart);\n" - "&log(INFO, \"backup start archive = ${strArchiveStart}, lsn = ${strLsnStart}\");\n" - "\n\n" - "$strTimelineCurrent = substr($strArchiveStart, 0, 8);\n" - "\n\n" - "$hTablespaceMap = $oDbMaster->tablespaceMapGet();\n" - "\n\n" - "$hDatabaseMap = $oDbMaster->databaseMapGet();\n" - "\n\n" - "if (cfgOption(CFGOPT_BACKUP_STANDBY))\n" - "{\n" - "my ($strStandbyDbVersion, $iStandbyControlVersion, $iStandbyCatalogVersion, $ullStandbyDbSysId) = $oDbStandby->info();\n" - "$oBackupInfo->check($strStandbyDbVersion, $iStandbyControlVersion, $iStandbyCatalogVersion, $ullStandbyDbSysId);\n" - "\n" - "$oDbStandby->configValidate();\n" - "\n" - "&log(INFO, \"wait for replay on the standby to reach ${strLsnStart}\");\n" - "\n" - "my ($strReplayedLSN, $strCheckpointLSN) = $oDbStandby->replayWait($strLsnStart);\n" - "\n" - "&log(\n" - "INFO,\n" - "\"replay on the standby reached ${strReplayedLSN}\" .\n" - "(defined($strCheckpointLSN) ? \", checkpoint ${strCheckpointLSN}\" : ''));\n" - "\n\n" - "undef($oDbStandby);\n" - "protocolDestroy(CFGOPTVAL_REMOTE_TYPE_DB, $self->{iCopyRemoteIdx}, true);\n" - "}\n" - "}\n" - "\n\n\n" - "if ($strType ne CFGOPTVAL_BACKUP_TYPE_FULL && defined($strBackupLastPath))\n" - "{\n" - "\n\n" - "if (!$oLastManifest->test(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE))\n" - "{\n" - "cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false);\n" - "}\n" - "else\n" - "{\n" - "my $bChecksumPageLast =\n" - "$oLastManifest->boolGet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE);\n" - "\n" - "if ($bChecksumPageLast != cfgOption(CFGOPT_CHECKSUM_PAGE))\n" - "{\n" - "&log(WARN,\n" - "\"${strType} backup cannot alter '\" . cfgOptionName(CFGOPT_CHECKSUM_PAGE) . \"' option to '\" .\n" - "boolFormat(cfgOption(CFGOPT_CHECKSUM_PAGE)) . \"', reset to '\" . boolFormat($bChecksumPageLast) .\n" - "\"' from ${strBackupLastPath}\");\n" - "cfgOptionSet(CFGOPT_CHECKSUM_PAGE, $bChecksumPageLast);\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_CHECKSUM_PAGE, undef, cfgOption(CFGOPT_CHECKSUM_PAGE));\n" - "\n\n" - "cfgOptionSet(CFGOPT_DELTA, $oBackupManifest->build(\n" - "$oStorageDbMaster, $strDbMasterPath, $oLastManifest, cfgOption(CFGOPT_ONLINE), cfgOption(CFGOPT_DELTA), $hTablespaceMap,\n" - "$hDatabaseMap, cfgOption(CFGOPT_EXCLUDE, false), $strTimelineCurrent, $strTimelineLast));\n" - "\n\n" - "if (defined($oAbortedManifest))\n" - "{\n" - "&log(WARN, \"aborted backup ${strBackupLabel} of same type exists, will be cleaned to remove invalid files and resumed\");\n" - "\n\n\n" - "cfgOptionSet(CFGOPT_DELTA, $self->resumeClean($oStorageRepo, $strBackupLabel, $oBackupManifest, $oAbortedManifest,\n" - "cfgOption(CFGOPT_ONLINE), cfgOption(CFGOPT_DELTA), $strTimelineCurrent, $strTimelineAborted));\n" - "}\n" - "\n" - "else\n" - "{\n" - "logDebugMisc($strOperation, \"create backup path ${strBackupPath}\");\n" - "$oStorageRepo->pathCreate(STORAGE_REPO_BACKUP . \"/${strBackupLabel}\");\n" - "}\n" - "\n\n" - "$oBackupManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_DELTA, undef, cfgOption(CFGOPT_DELTA));\n" - "\n\n" - "$oBackupManifest->saveCopy();\n" - "\n\n" - "my $lBackupSizeTotal =\n" - "$self->processManifest(\n" - "$strDbMasterPath, $strDbCopyPath, $strType, $strDbVersion, $bCompress, $bHardLink, $oBackupManifest, $strBackupLabel,\n" - "$strLsnStart);\n" - "&log(INFO, \"${strType} backup size = \" . fileSizeFormat($lBackupSizeTotal));\n" - "\n\n" - "undef($oStorageDbMaster);\n" - "\n\n" - "my $strArchiveStop = undef;\n" - "my $strLsnStop = undef;\n" - "\n" - "if (cfgOption(CFGOPT_ONLINE))\n" - "{\n" - "($strArchiveStop, $strLsnStop, my $strTimestampDbStop, my $oFileHash) = $oDbMaster->backupStop();\n" - "\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_ARCHIVE_STOP, undef, $strArchiveStop);\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LSN_STOP, undef, $strLsnStop);\n" - "&log(INFO, \"backup stop archive = ${strArchiveStop}, lsn = ${strLsnStop}\");\n" - "\n\n" - "foreach my $strFile (sort(keys(%{$oFileHash})))\n" - "{\n" - "\n" - "if (defined($oFileHash->{$strFile}))\n" - "{\n" - "my $rhyFilter = [{strClass => STORAGE_FILTER_SHA}];\n" - "\n\n" - "if ($bCompress)\n" - "{\n" - "push(\n" - "@{$rhyFilter},\n" - "{strClass => STORAGE_FILTER_GZIP, rxyParam => [STORAGE_COMPRESS, false, cfgOption(CFGOPT_COMPRESS_LEVEL)]});\n" - "}\n" - "\n\n\n" - "my $oDestinationFileIo = $oStorageRepo->openWrite(\n" - "STORAGE_REPO_BACKUP . \"/${strBackupLabel}/${strFile}\" . ($bCompress ? qw{.} . COMPRESS_EXT : ''),\n" - "{rhyFilter => $rhyFilter,\n" - "strCipherPass => defined($strCipherPassBackupSet) ? $strCipherPassBackupSet : undef});\n" - "\n\n" - "$oStorageRepo->put($oDestinationFileIo, $oFileHash->{$strFile});\n" - "\n\n" - "$oBackupManifest->fileAdd(\n" - "$strFile, time(), length($oFileHash->{$strFile}), $oDestinationFileIo->result(STORAGE_FILTER_SHA), true);\n" - "\n" - "&log(DETAIL, \"wrote '${strFile}' file returned from pg_stop_backup()\");\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "undef($oDbMaster);\n" - "protocolDestroy(undef, undef, true);\n" - "\n\n\n\n" - "if (cfgOption(CFGOPT_ONLINE) && cfgOption(CFGOPT_ARCHIVE_CHECK))\n" - "{\n" - "\n" - "$oBackupManifest->saveCopy();\n" - "\n\n" - "my $lModificationTime = time();\n" - "\n\n" - "logDebugMisc($strOperation, \"retrieve archive logs ${strArchiveStart}:${strArchiveStop}\");\n" - "\n" - "my $oArchiveInfo = new pgBackRest::Archive::Info(storageRepo()->pathGet(STORAGE_REPO_ARCHIVE), true);\n" - "my $strArchiveId = $oArchiveInfo->archiveId();\n" - "my @stryArchive = lsnFileRange($strLsnStart, $strLsnStop, $strDbVersion, $iWalSegmentSize);\n" - "\n" - "foreach my $strArchive (@stryArchive)\n" - "{\n" - "my $strArchiveFile = walSegmentFind(\n" - "$oStorageRepo, $strArchiveId, substr($strArchiveStop, 0, 8) . $strArchive, cfgOption(CFGOPT_ARCHIVE_TIMEOUT));\n" - "\n" - "$strArchive = substr($strArchiveFile, 0, 24);\n" - "\n" - "if (cfgOption(CFGOPT_ARCHIVE_COPY))\n" - "{\n" - "logDebugMisc($strOperation, \"archive: ${strArchive} (${strArchiveFile})\");\n" - "\n\n" - "my $bArchiveCompressed = $strArchiveFile =~ ('^.*\\.' . COMPRESS_EXT . '\\$');\n" - "\n" - "$oStorageRepo->copy(\n" - "$oStorageRepo->openRead(STORAGE_REPO_ARCHIVE . \"/${strArchiveId}/${strArchiveFile}\",\n" - "{strCipherPass => $oArchiveInfo->cipherPassSub()}),\n" - "$oStorageRepo->openWrite(STORAGE_REPO_BACKUP . \"/${strBackupLabel}/\" . MANIFEST_TARGET_PGDATA . qw{/} .\n" - "$oBackupManifest->walPath() . \"/${strArchive}\" . ($bCompress ? qw{.} . COMPRESS_EXT : ''),\n" - "{bPathCreate => true, strCipherPass => $strCipherPassBackupSet})\n" - ");\n" - "\n\n" - "my $strPathLog = MANIFEST_TARGET_PGDATA . qw{/} . $oBackupManifest->walPath();\n" - "my $strFileLog = \"${strPathLog}/${strArchive}\";\n" - "\n\n" - "$oBackupManifest->fileAdd(\n" - "$strFileLog, $lModificationTime, PG_WAL_SEGMENT_SIZE, substr($strArchiveFile, 25, 40), true);\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "my $lTimestampStop = time();\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_TIMESTAMP_STOP, undef, $lTimestampStop + 0);\n" - "$oBackupManifest->set(MANIFEST_SECTION_BACKUP, MANIFEST_KEY_LABEL, undef, $strBackupLabel);\n" - "\n\n" - "if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC))\n" - "{\n" - "\n" - "$oStorageRepo->pathSync(STORAGE_REPO_BACKUP . \"/${strBackupLabel}\");\n" - "\n" - "foreach my $strPath ($oBackupManifest->keys(MANIFEST_SECTION_TARGET_PATH))\n" - "{\n" - "my $strPathSync = $oStorageRepo->pathGet(STORAGE_REPO_BACKUP . \"/${strBackupLabel}/$strPath\");\n" - "\n\n" - "if ($strType eq CFGOPTVAL_BACKUP_TYPE_FULL || $oStorageRepo->pathExists($strPathSync))\n" - "{\n" - "$oStorageRepo->pathSync($strPathSync);\n" - "}\n" - "}\n" - "}\n" - "\n\n" - "$oBackupManifest->save();\n" - "\n" - "&log(INFO, \"new backup label = ${strBackupLabel}\");\n" - "\n\n\n" - "my $strHistoryPath = $oStorageRepo->pathGet(\n" - "STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY . qw{/} . substr($strBackupLabel, 0, 4));\n" - "\n" - "$oStorageRepo->copy(\n" - "$oStorageRepo->openRead(STORAGE_REPO_BACKUP . \"/${strBackupLabel}/\" . FILE_MANIFEST,\n" - "{'strCipherPass' => $strCipherPassManifest}),\n" - "$oStorageRepo->openWrite(\n" - "\"${strHistoryPath}/${strBackupLabel}.manifest.\" . COMPRESS_EXT,\n" - "{rhyFilter => [{strClass => STORAGE_FILTER_GZIP, rxyParam => [STORAGE_COMPRESS, false, 9]}],\n" - "bPathCreate => true, bAtomic => true,\n" - "strCipherPass => defined($strCipherPassManifest) ? $strCipherPassManifest : undef}));\n" - "\n\n" - "if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC))\n" - "{\n" - "$oStorageRepo->pathSync(STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY);\n" - "$oStorageRepo->pathSync($strHistoryPath);\n" - "}\n" - "\n\n" - "$oStorageRepo->remove(STORAGE_REPO_BACKUP . qw(/) . LINK_LATEST);\n" - "\n" - "if (storageRepo()->capability(STORAGE_CAPABILITY_LINK))\n" - "{\n" - "$oStorageRepo->linkCreate(\n" - "STORAGE_REPO_BACKUP . \"/${strBackupLabel}\", STORAGE_REPO_BACKUP . qw{/} . LINK_LATEST, {bRelative => true});\n" - "}\n" - "\n\n" - "$oBackupInfo->add($oBackupManifest);\n" - "\n\n" - "if ($oStorageRepo->capability(STORAGE_CAPABILITY_PATH_SYNC))\n" - "{\n" - "$oStorageRepo->pathSync(STORAGE_REPO_BACKUP);\n" - "}\n" - "\n\n" - "return logDebugReturn($strOperation);\n" - "}\n" - "\n" - "1;\n" - }, { .name = "pgBackRest/Backup/Common.pm", .data = @@ -1924,253 +765,6 @@ static const EmbeddedModule embeddedModule[] = "\n" "1;\n" }, - { - .name = "pgBackRest/Backup/File.pm", - .data = - "\n\n\n" - "package pgBackRest::Backup::File;\n" - "\n" - "use strict;\n" - "use warnings FATAL => qw(all);\n" - "use Carp qw(confess);\n" - "\n" - "use Exporter qw(import);\n" - "our @EXPORT = qw();\n" - "use File::Basename qw(dirname);\n" - "use Storable qw(dclone);\n" - "\n" - "use pgBackRest::Common::Exception;\n" - "use pgBackRest::Common::Io::Handle;\n" - "use pgBackRest::Common::Log;\n" - "use pgBackRest::Common::String;\n" - "use pgBackRest::Config::Config;\n" - "use pgBackRest::DbVersion;\n" - "use pgBackRest::Manifest;\n" - "use pgBackRest::Protocol::Storage::Helper;\n" - "use pgBackRest::Storage::Base;\n" - "use pgBackRest::Storage::Helper;\n" - "\n\n\n\n" - "use constant BACKUP_FILE_CHECKSUM => 0;\n" - "push @EXPORT, qw(BACKUP_FILE_CHECKSUM);\n" - "use constant BACKUP_FILE_COPY => 1;\n" - "push @EXPORT, qw(BACKUP_FILE_COPY);\n" - "use constant BACKUP_FILE_RECOPY => 2;\n" - "push @EXPORT, qw(BACKUP_FILE_RECOPY);\n" - "use constant BACKUP_FILE_SKIP => 3;\n" - "push @EXPORT, qw(BACKUP_FILE_SKIP);\n" - "use constant BACKUP_FILE_NOOP => 4;\n" - "push @EXPORT, qw(BACKUP_FILE_NOOP);\n" - "\n\n\n\n" - "sub backupManifestUpdate\n" - "{\n" - "\n" - "my\n" - "(\n" - "$strOperation,\n" - "$oManifest,\n" - "$strHost,\n" - "$iLocalId,\n" - "$strDbFile,\n" - "$strRepoFile,\n" - "$lSize,\n" - "$strChecksum,\n" - "$bChecksumPage,\n" - "$iCopyResult,\n" - "$lSizeCopy,\n" - "$lSizeRepo,\n" - "$strChecksumCopy,\n" - "$rExtra,\n" - "$lSizeTotal,\n" - "$lSizeCurrent,\n" - "$lManifestSaveSize,\n" - "$lManifestSaveCurrent\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '::backupManifestUpdate', \\@_,\n" - "{name => 'oManifest', trace => true},\n" - "{name => 'strHost', required => false, trace => true},\n" - "{name => 'iLocalId', required => false, trace => true},\n" - "\n\n" - "{name => 'strDbFile', trace => true},\n" - "{name => 'strRepoFile', trace => true},\n" - "{name => 'lSize', required => false, trace => true},\n" - "{name => 'strChecksum', required => false, trace => true},\n" - "{name => 'bChecksumPage', trace => true},\n" - "\n\n" - "{name => 'iCopyResult', trace => true},\n" - "{name => 'lSizeCopy', required => false, trace => true},\n" - "{name => 'lSizeRepo', required => false, trace => true},\n" - "{name => 'strChecksumCopy', required => false, trace => true},\n" - "{name => 'rExtra', required => false, trace => true},\n" - "\n\n" - "{name => 'lSizeTotal', trace => true},\n" - "{name => 'lSizeCurrent', trace => true},\n" - "{name => 'lManifestSaveSize', trace => true},\n" - "{name => 'lManifestSaveCurrent', trace => true}\n" - ");\n" - "\n\n" - "$lSizeCurrent += $lSize;\n" - "\n\n" - "if ($iCopyResult == BACKUP_FILE_NOOP)\n" - "{\n" - "\n" - "$oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE, $lSizeCopy);\n" - "$oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksumCopy);\n" - "\n" - "&log(DETAIL,\n" - "'match file from prior backup ' . (defined($strHost) ? \"${strHost}:\" : '') . \"${strDbFile} (\" .\n" - "fileSizeFormat($lSizeCopy) . ', ' . int($lSizeCurrent * 100 / $lSizeTotal) . '%)' .\n" - "($lSizeCopy != 0 ? \" checksum ${strChecksumCopy}\" : ''),\n" - "undef, undef, undef, $iLocalId);\n" - "}\n" - "\n" - "else\n" - "{\n" - "\n" - "if ($iCopyResult == BACKUP_FILE_RECOPY)\n" - "{\n" - "&log(\n" - "WARN,\n" - "\"resumed backup file ${strRepoFile} does not have expected checksum ${strChecksum}. The file will be recopied and\" .\n" - "\" backup will continue but this may be an issue unless the resumed backup path in the repository is known to be\" .\n" - "\" corrupted.\\n\" .\n" - "\"NOTE: this does not indicate a problem with the PostgreSQL page checksums.\");\n" - "}\n" - "\n\n" - "if ($iCopyResult == BACKUP_FILE_COPY || $iCopyResult == BACKUP_FILE_RECOPY || $iCopyResult == BACKUP_FILE_CHECKSUM)\n" - "{\n" - "\n" - "&log($iCopyResult == BACKUP_FILE_CHECKSUM ? DETAIL : INFO,\n" - "($iCopyResult == BACKUP_FILE_CHECKSUM ?\n" - "'checksum resumed file ' : 'backup file ' . (defined($strHost) ? \"${strHost}:\" : '')) .\n" - "\"${strDbFile} (\" . fileSizeFormat($lSizeCopy) .\n" - "', ' . int($lSizeCurrent * 100 / $lSizeTotal) . '%)' .\n" - "($lSizeCopy != 0 ? \" checksum ${strChecksumCopy}\" : ''), undef, undef, undef, $iLocalId);\n" - "\n" - "$oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE, $lSizeCopy);\n" - "\n" - "if ($lSizeRepo != $lSizeCopy)\n" - "{\n" - "$oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REPO_SIZE, $lSizeRepo);\n" - "}\n" - "\n" - "if ($lSizeCopy > 0)\n" - "{\n" - "$oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, $strChecksumCopy);\n" - "}\n" - "\n\n" - "if ($iCopyResult == BACKUP_FILE_COPY || $iCopyResult == BACKUP_FILE_RECOPY)\n" - "{\n" - "$oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REFERENCE);\n" - "}\n" - "\n\n" - "if ($bChecksumPage)\n" - "{\n" - "\n" - "if (defined($rExtra->{valid}))\n" - "{\n" - "\n" - "$oManifest->boolSet(\n" - "MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, $rExtra->{valid});\n" - "\n\n" - "if (!$rExtra->{valid})\n" - "{\n" - "\n" - "if ($lSizeCopy % PG_PAGE_SIZE != 0)\n" - "{\n" - "\n" - "if (!defined($rExtra->{align}) || $rExtra->{align})\n" - "{\n" - "confess &log(ASSERT, 'align flag should have been set for misaligned page');\n" - "}\n" - "\n\n" - "&log(WARN,\n" - "'page misalignment in file ' . (defined($strHost) ? \"${strHost}:\" : '') .\n" - "\"${strDbFile}: file size ${lSizeCopy} is not divisible by page size \" . PG_PAGE_SIZE);\n" - "}\n" - "\n" - "else\n" - "{\n" - "$oManifest->set(\n" - "MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR,\n" - "dclone($rExtra->{error}));\n" - "\n\n" - "my $strPageError;\n" - "my $iPageErrorTotal = 0;\n" - "\n" - "foreach my $iyPage (@{$rExtra->{error}})\n" - "{\n" - "$strPageError .= (defined($strPageError) ? ', ' : '');\n" - "\n\n" - "if (ref($iyPage))\n" - "{\n" - "$strPageError .= $$iyPage[0] . '-' . $$iyPage[1];\n" - "$iPageErrorTotal += ($$iyPage[1] - $$iyPage[0]) + 1;\n" - "}\n" - "\n" - "else\n" - "{\n" - "$strPageError .= $iyPage;\n" - "$iPageErrorTotal += 1;\n" - "}\n" - "}\n" - "\n\n" - "if ($iPageErrorTotal == 0)\n" - "{\n" - "confess &log(ASSERT, 'page checksum error list should have at least one entry');\n" - "}\n" - "\n\n" - "&log(WARN,\n" - "'invalid page checksum' . ($iPageErrorTotal > 1 ? 's' : '') .\n" - "' found in file ' . (defined($strHost) ? \"${strHost}:\" : '') . \"${strDbFile} at page\" .\n" - "($iPageErrorTotal > 1 ? 's' : '') . \" ${strPageError}\");\n" - "}\n" - "}\n" - "}\n" - "\n" - "elsif (!$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE))\n" - "{\n" - "confess &log(ASSERT, \"${strDbFile} should have calculated page checksums\");\n" - "}\n" - "}\n" - "}\n" - "\n" - "elsif ($iCopyResult == BACKUP_FILE_SKIP)\n" - "{\n" - "&log(DETAIL, 'skip file removed by database ' . (defined($strHost) ? \"${strHost}:\" : '') . $strDbFile);\n" - "$oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strRepoFile);\n" - "}\n" - "}\n" - "\n\n" - "$lManifestSaveCurrent += $lSize;\n" - "\n" - "if ($lManifestSaveCurrent >= $lManifestSaveSize)\n" - "{\n" - "$oManifest->saveCopy();\n" - "\n" - "logDebugMisc\n" - "(\n" - "$strOperation, 'save manifest',\n" - "{name => 'lManifestSaveSize', value => $lManifestSaveSize},\n" - "{name => 'lManifestSaveCurrent', value => $lManifestSaveCurrent}\n" - ");\n" - "\n" - "$lManifestSaveCurrent = 0;\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'lSizeCurrent', value => $lSizeCurrent, trace => true},\n" - "{name => 'lManifestSaveCurrent', value => $lManifestSaveCurrent, trace => true},\n" - ");\n" - "}\n" - "\n" - "push @EXPORT, qw(backupManifestUpdate);\n" - "\n" - "1;\n" - }, { .name = "pgBackRest/Backup/Info.pm", .data = @@ -6377,8 +4971,6 @@ static const EmbeddedModule embeddedModule[] = "use pgBackRest::Protocol::Helper;\n" "use pgBackRest::Protocol::Storage::Helper;\n" "use pgBackRest::Version;\n" - "\n\n\n\n\n\n" - "use constant PG_WAL_SIZE_83 => 16777216;\n" "\n\n\n\n" "use constant DB_BACKUP_ADVISORY_LOCK => '12340078987004321';\n" "push @EXPORT, qw(DB_BACKUP_ADVISORY_LOCK);\n" @@ -6631,47 +5223,6 @@ static const EmbeddedModule embeddedModule[] = "{name => 'strResult', value => @{@{$self->executeSql($strSql)}[0]}[0]}\n" ");\n" "}\n" - "\n\n\n\n\n\n" - "sub tablespaceMapGet\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my ($strOperation) = logDebugParam(__PACKAGE__ . '->tablespaceMapGet');\n" - "\n" - "my $hTablespaceMap = {};\n" - "\n" - "for my $strRow (@{$self->executeSql('select oid, spcname from pg_tablespace')})\n" - "{\n" - "$hTablespaceMap->{@{$strRow}[0]} = @{$strRow}[1];\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'hTablespaceMap', value => $hTablespaceMap}\n" - ");\n" - "}\n" - "\n\n\n\n\n\n" - "sub databaseMapGet\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my ($strOperation) = logDebugParam(__PACKAGE__ . '->databaseMapGet');\n" - "\n" - "my $hDatabaseMap = {};\n" - "\n" - "for my $strRow (@{$self->executeSql('select datname, oid, datlastsysoid from pg_database')})\n" - "{\n" - "$hDatabaseMap->{@{$strRow}[0]}{&MANIFEST_KEY_DB_ID} = @{$strRow}[1];\n" - "$hDatabaseMap->{@{$strRow}[0]}{&MANIFEST_KEY_DB_LAST_SYSTEM_ID} = @{$strRow}[2];\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'hDatabaseMap', value => $hDatabaseMap}\n" - ");\n" - "}\n" "\n\n\n\n" "sub info\n" "{\n" @@ -6789,135 +5340,6 @@ static const EmbeddedModule embeddedModule[] = "{name => 'strDbPath', value => $strDbPath}\n" ");\n" "}\n" - "\n\n\n\n" - "sub backupStart\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strLabel,\n" - "$bStartFast\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '->backupStart', \\@_,\n" - "{name => 'strLabel'},\n" - "{name => 'bStartFast'}\n" - ");\n" - "\n\n" - "$self->configValidate();\n" - "\n\n" - "if ($self->{strDbVersion} < PG_VERSION_84 && $bStartFast)\n" - "{\n" - "&log(WARN, cfgOptionName(CFGOPT_START_FAST) . ' option is only available in PostgreSQL >= ' . PG_VERSION_84);\n" - "$bStartFast = false;\n" - "}\n" - "\n\n" - "my $bChecksumPage =\n" - "$self->executeSqlOne(\"select count(*) = 1 from pg_settings where name = 'data_checksums' and setting = 'on'\");\n" - "\n\n" - "if (!cfgOptionTest(CFGOPT_CHECKSUM_PAGE))\n" - "{\n" - "cfgOptionSet(CFGOPT_CHECKSUM_PAGE, $bChecksumPage);\n" - "}\n" - "\n" - "elsif (cfgOption(CFGOPT_CHECKSUM_PAGE) && !$bChecksumPage)\n" - "{\n" - "&log(WARN, 'unable to enable page checksums since they are not enabled in the database');\n" - "cfgOptionSet(CFGOPT_CHECKSUM_PAGE, false);\n" - "}\n" - "\n\n\n" - "if (!$self->executeSqlOne('select pg_try_advisory_lock(' . DB_BACKUP_ADVISORY_LOCK . ')'))\n" - "{\n" - "confess &log(ERROR, 'unable to acquire ' . PROJECT_NAME . \" advisory lock\\n\" .\n" - "'HINT: is another ' . PROJECT_NAME . ' backup already running on this cluster?', ERROR_LOCK_ACQUIRE);\n" - "}\n" - "\n\n\n" - "if (cfgOption(CFGOPT_STOP_AUTO) && $self->{strDbVersion} < PG_VERSION_96)\n" - "{\n" - "\n" - "if ($self->{strDbVersion} >= PG_VERSION_93)\n" - "{\n" - "\n" - "if ($self->executeSqlOne('select pg_is_in_backup()'))\n" - "{\n" - "&log(WARN, 'the cluster is already in backup mode but no ' . PROJECT_NAME . ' backup process is running.' .\n" - "' pg_stop_backup() will be called so a new backup can be started.');\n" - "$self->backupStop();\n" - "}\n" - "}\n" - "\n\n" - "else\n" - "{\n" - "&log(WARN, cfgOptionName(CFGOPT_STOP_AUTO) . ' option is only available in PostgreSQL >= ' . PG_VERSION_93);\n" - "}\n" - "}\n" - "\n\n" - "&log(INFO, 'execute ' . ($self->{strDbVersion} >= PG_VERSION_96 ? 'non-' : '') .\n" - "\"exclusive pg_start_backup() with label \\\"${strLabel}\\\": backup begins after \" .\n" - "($bStartFast ? \"the requested immediate checkpoint\" : \"the next regular checkpoint\") . \" completes\");\n" - "\n" - "my ($strTimestampDbStart, $strArchiveStart, $strLsnStart, $iWalSegmentSize) = $self->executeSqlRow(\n" - "\"select to_char(current_timestamp, 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_\" . $self->walId() . \"file_name(lsn), lsn::text,\" .\n" - "($self->{strDbVersion} < PG_VERSION_84 ? PG_WAL_SIZE_83 :\n" - "\" (select setting::int8 from pg_settings where name = 'wal_segment_size')\" .\n" - "\n" - "($self->{strDbVersion} < PG_VERSION_11 ?\n" - "\" * (select setting::int8 from pg_settings where name = 'wal_block_size')\" : '')) .\n" - "\" from pg_start_backup('${strLabel}'\" .\n" - "($bStartFast ? ', true' : $self->{strDbVersion} >= PG_VERSION_84 ? ', false' : '') .\n" - "($self->{strDbVersion} >= PG_VERSION_96 ? ', false' : '') . ') as lsn');\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'strArchiveStart', value => $strArchiveStart},\n" - "{name => 'strLsnStart', value => $strLsnStart},\n" - "{name => 'iWalSegmentSize', value => $iWalSegmentSize},\n" - "{name => 'strTimestampDbStart', value => $strTimestampDbStart}\n" - ");\n" - "}\n" - "\n\n\n\n" - "sub backupStop\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my ($strOperation) = logDebugParam(__PACKAGE__ . '->backupStop');\n" - "\n\n" - "&log(INFO, 'execute ' . ($self->{strDbVersion} >= PG_VERSION_96 ? 'non-' : '') .\n" - "'exclusive pg_stop_backup() and wait for all WAL segments to archive');\n" - "\n" - "my ($strTimestampDbStop, $strArchiveStop, $strLsnStop, $strLabel, $strTablespaceMap) =\n" - "$self->executeSqlRow(\n" - "\"select to_char(clock_timestamp(), 'YYYY-MM-DD HH24:MI:SS.US TZ'), pg_\" .\n" - "$self->walId() . \"file_name(lsn), lsn::text, \" .\n" - "($self->{strDbVersion} >= PG_VERSION_96 ?\n" - "'labelfile, ' .\n" - "'case when length(trim(both \\'\\t\\n \\' from spcmapfile)) = 0 then null else spcmapfile end as spcmapfile' :\n" - "'null as labelfile, null as spcmapfile') .\n" - "' from pg_stop_backup(' .\n" - "\n" - "($self->{strDbVersion} >= PG_VERSION_96 ? 'false' : '') .\n" - "\n" - "($self->{strDbVersion} >= PG_VERSION_10 ? ', false' : '') . ') as lsn');\n" - "\n\n" - "my $oFileHash =\n" - "{\n" - "&MANIFEST_FILE_BACKUPLABEL => $strLabel,\n" - "&MANIFEST_FILE_TABLESPACEMAP => $strTablespaceMap\n" - "};\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'strArchiveStop', value => $strArchiveStop},\n" - "{name => 'strLsnStop', value => $strLsnStop},\n" - "{name => 'strTimestampDbStop', value => $strTimestampDbStop},\n" - "{name => 'oFileHash', value => $oFileHash}\n" - ");\n" - "}\n" "\n\n\n\n\n\n" "sub configValidate\n" "{\n" @@ -6984,13 +5406,6 @@ static const EmbeddedModule embeddedModule[] = "return $self->{strDbVersion} >= PG_VERSION_10 ? 'wal' : 'xlog';\n" "}\n" "\n\n\n\n\n\n" - "sub lsnId\n" - "{\n" - "my $self = shift;\n" - "\n" - "return $self->{strDbVersion} >= PG_VERSION_10 ? 'lsn' : 'location';\n" - "}\n" - "\n\n\n\n\n\n" "sub isStandby\n" "{\n" "my $self = shift;\n" @@ -7017,97 +5432,6 @@ static const EmbeddedModule embeddedModule[] = "{name => 'bStandby', value => $self->{bStandby}}\n" ");\n" "}\n" - "\n\n\n\n\n\n" - "sub replayWait\n" - "{\n" - "my $self = shift;\n" - "\n\n" - "my\n" - "(\n" - "$strOperation,\n" - "$strTargetLSN,\n" - ") =\n" - "logDebugParam\n" - "(\n" - "__PACKAGE__ . '->replayWait', \\@_,\n" - "{name => 'strTargetLSN'}\n" - ");\n" - "\n\n" - "require pgBackRest::Archive::Common;\n" - "pgBackRest::Archive::Common->import();\n" - "\n\n" - "my $oWait = waitInit(cfgOption(CFGOPT_ARCHIVE_TIMEOUT));\n" - "my $bTimeout = true;\n" - "my $strReplayedLSN = undef;\n" - "\n\n" - "do\n" - "{\n" - "my $strLastWalReplayLsnFunction =\n" - "'pg_last_' . $self->walId() . '_replay_' . $self->lsnId() . '()';\n" - "\n\n" - "my $strLastReplayedLSN = $self->executeSqlOne(\n" - "\"select coalesce(${strLastWalReplayLsnFunction}::text, '')\");\n" - "\n\n" - "if ($strLastReplayedLSN eq '')\n" - "{\n" - "confess &log(\n" - "ERROR,\n" - "\"unable to query replay lsn on the standby using ${strLastWalReplayLsnFunction}\\n\" .\n" - "\"Hint: Is this a standby?\",\n" - "ERROR_ARCHIVE_TIMEOUT);\n" - "}\n" - "\n\n\n" - "if (lsnNormalize($strLastReplayedLSN) ge lsnNormalize($strTargetLSN))\n" - "{\n" - "$bTimeout = false;\n" - "}\n" - "else\n" - "{\n" - "\n" - "if (defined($strReplayedLSN) &&\n" - "lsnNormalize($strLastReplayedLSN) gt lsnNormalize($strReplayedLSN) &&\n" - "!waitMore($oWait))\n" - "{\n" - "$oWait = waitInit(cfgOption(CFGOPT_ARCHIVE_TIMEOUT));\n" - "}\n" - "}\n" - "\n\n" - "$strReplayedLSN = $strLastReplayedLSN;\n" - "\n" - "} while ($bTimeout && waitMore($oWait));\n" - "\n\n" - "if ($bTimeout == true)\n" - "{\n" - "confess &log(\n" - "ERROR, \"timeout before standby replayed ${strTargetLSN} - only reached ${strReplayedLSN}\", ERROR_ARCHIVE_TIMEOUT);\n" - "}\n" - "\n\n" - "$self->executeSql('checkpoint', undef, false);\n" - "\n\n\n\n\n\n" - "my $strCheckpointLSN = undef;\n" - "\n" - "if ($self->{strDbVersion} >= PG_VERSION_96)\n" - "{\n" - "$strCheckpointLSN = $self->executeSqlOne('select checkpoint_' . $self->lsnId() .'::text from pg_control_checkpoint()');\n" - "\n" - "if (lsnNormalize($strCheckpointLSN) le lsnNormalize($strTargetLSN))\n" - "{\n" - "confess &log(\n" - "ERROR,\n" - "\"the checkpoint location ${strCheckpointLSN} is less than the target location ${strTargetLSN} even though the\" .\n" - "\" replay location is ${strReplayedLSN}\\n\" .\n" - "\"Hint: This should not be possible and may indicate a bug in PostgreSQL.\",\n" - "ERROR_ARCHIVE_TIMEOUT);\n" - "}\n" - "}\n" - "\n\n" - "return logDebugReturn\n" - "(\n" - "$strOperation,\n" - "{name => 'strReplayedLSN', value => $strReplayedLSN},\n" - "{name => 'strCheckpointLSN', value => $strCheckpointLSN},\n" - ");\n" - "}\n" "\n\n\n\n\n\n\n\n" "sub dbObjectGet\n" "{\n" @@ -7792,7 +6116,6 @@ static const EmbeddedModule embeddedModule[] = "\n" "use File::Basename qw(dirname);\n" "\n" - "use pgBackRest::Backup::Info;\n" "use pgBackRest::Common::Exception;\n" "use pgBackRest::Common::Lock;\n" "use pgBackRest::Common::Log;\n" @@ -7880,23 +6203,6 @@ static const EmbeddedModule embeddedModule[] = "logFileSet(\n" "storageLocal(),\n" "cfgOption(CFGOPT_LOG_PATH) . '/' . cfgOption(CFGOPT_STANZA) . '-' . lc(cfgCommandName(cfgCommandGet())));\n" - "\n\n" - "lockStopTest();\n" - "\n\n" - "if (!isRepoLocal())\n" - "{\n" - "confess &log(ERROR,\n" - "cfgCommandName(cfgCommandGet()) . ' command must be run on the repository host', ERROR_HOST_INVALID);\n" - "}\n" - "\n\n\n" - "if (cfgCommandTest(CFGCMD_BACKUP))\n" - "{\n" - "\n" - "require pgBackRest::Backup::Backup;\n" - "pgBackRest::Backup::Backup->import();\n" - "\n" - "new pgBackRest::Backup::Backup()->process();\n" - "}\n" "}\n" "\n" "return 1;\n" diff --git a/test/define.yaml b/test/define.yaml index 05386e97c..de98607d8 100644 --- a/test/define.yaml +++ b/test/define.yaml @@ -592,10 +592,6 @@ unit: coverage: command/archive/common: full - # ---------------------------------------------------------------------------------------------------------------------------- - - name: archive-common-perl - total: 4 - # ---------------------------------------------------------------------------------------------------------------------------- - name: archive-get total: 5 @@ -626,13 +622,16 @@ unit: # ---------------------------------------------------------------------------------------------------------------------------- - name: backup - total: 3 + total: 10 + perlReq: true coverage: + command/backup/backup: full command/backup/file: full command/backup/protocol: full include: + - info/manifest - storage/storage # ---------------------------------------------------------------------------------------------------------------------------- @@ -728,18 +727,6 @@ unit: coverage: command/storage/list: full - # ******************************************************************************************************************************** - - name: backup - - test: - # ---------------------------------------------------------------------------------------------------------------------------- - - name: unit-perl - total: 4 - - # ---------------------------------------------------------------------------------------------------------------------------- - - name: file-unit-perl - total: 2 - # ******************************************************************************************************************************** - name: manifest diff --git a/test/expect/mock-all-001.log b/test/expect/mock-all-001.log index adc3e4a1a..db9fe5a6b 100644 --- a/test/expect/mock-all-001.log +++ b/test/expect/mock-all-001.log @@ -61,7 +61,7 @@ full backup - error on identical link destinations (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast --type=full P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [070]: link [TEST_PATH]/db-master/db/base/pg_hba.conf (../pg_config) references a subdirectory of or the same directory as link [TEST_PATH]/db-master/db/base/pg_config_bad (../../db/pg_config) +P00 ERROR: [070]: link 'pg_config_bad/pg_hba.conf.link' destination '[TEST_PATH]/db-master/db/base/pg_config_bad' is in PGDATA P00 INFO: backup command end: aborted with exception [070] full backup - error on link to a link (db-master host) @@ -70,7 +70,7 @@ full backup - error on link to a link (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast --type=full P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/postgresql.conf.bad' -> '../pg_config/postgresql.conf.link' cannot reference another link +P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/postgresql.conf.bad' cannot reference another link '[TEST_PATH]/db-master/db/pg_config/postgresql.conf.link' P00 INFO: backup command end: aborted with exception [070] full backup - create pg_stat link, pg_clog dir (db-master host) @@ -318,13 +318,14 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-FULL-1]' missing manifest removed from backup.info P00 WARN: --no-online passed and postmaster.pid exists but --force was passed so backup will continue though it looks like the postmaster is running and the backup will probably not be consistent -P00 INFO: exclude apipe from backup using 'apipe' exclusion -P00 INFO: exclude pg_log/logfile from backup using 'pg_log/' exclusion -P00 INFO: exclude pg_log2 from backup using 'pg_log2' exclusion -P00 INFO: exclude pg_log2/logfile from backup using 'pg_log2' exclusion -P00 INFO: exclude postgresql.auto.conf from backup using 'postgresql.auto.conf' exclusion -P00 WARN: aborted backup [BACKUP-FULL-2] of same type exists, will be cleaned to remove invalid files and resumed -P00 DETAIL: clean resumed backup path: [TEST_PATH]/db-master/repo/backup/db/[BACKUP-FULL-2] +P00 INFO: exclude '[TEST_PATH]/db-master/db/base/apipe' from backup using 'apipe' exclusion +P00 INFO: exclude contents of '[TEST_PATH]/db-master/db/base/pg_log' from backup using 'pg_log/' exclusion +P00 INFO: exclude '[TEST_PATH]/db-master/db/base/pg_log2' from backup using 'pg_log2' exclusion +P00 INFO: exclude '[TEST_PATH]/db-master/db/base/postgresql.auto.conf' from backup using 'postgresql.auto.conf' exclusion +P00 WARN: resumable backup [BACKUP-FULL-2] of same type exists -- remove invalid files and resume +P00 DETAIL: remove file '[TEST_PATH]/db-master/repo/backup/db/[BACKUP-FULL-2]/file.tmp' from resumed backup (missing in manifest) +P00 DETAIL: remove file '[TEST_PATH]/db-master/repo/backup/db/[BACKUP-FULL-2]/pg_data/PG_VERSION' from resumed backup (no checksum in resumed manifest) +P00 DETAIL: remove file '[TEST_PATH]/db-master/repo/backup/db/[BACKUP-FULL-2]/pg_data/special-!_.*'()&!@;:+,?' from resumed backup (zero size) P01 DETAIL: checksum resumed file [TEST_PATH]/db-master/db/base/base/32768/33001 (64KB, 33%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: checksum resumed file [TEST_PATH]/db-master/db/base/base/32768/44000_init (32KB, 49%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: checksum resumed file [TEST_PATH]/db-master/db/base/base/32768/33000.32767 (32KB, 66%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -508,8 +509,12 @@ full backup - invalid repo (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --exclude=postgresql.auto.conf --exclude=pg_log/ --exclude=pg_log2 --exclude=apipe --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=/bogus_path --stanza=db --start-fast --type=full P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [073]: repo1-path '/bogus_path' does not exist -P00 INFO: backup command end: aborted with exception [073] +P00 ERROR: [055]: unable to load info file '/bogus_path/backup/db/backup.info' or '/bogus_path/backup/db/backup.info.copy': + FileMissingError: unable to open missing file '/bogus_path/backup/db/backup.info' for read + FileMissingError: unable to open missing file '/bogus_path/backup/db/backup.info.copy' for read + HINT: backup.info cannot be opened and is required to perform a backup. + HINT: has a stanza-create been performed? +P00 INFO: backup command end: aborted with exception [055] restore delta, backup '[BACKUP-FULL-2]' - add and delete files (db-master host) > [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --delta --set=[BACKUP-FULL-2] --link-all --stanza=db restore @@ -971,7 +976,7 @@ incr backup - invalid database version (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [051]: database version = 9.4, system-id 1000000000000000094 does not match backup version = 8.0, system-id = 1000000000000000094 +P00 ERROR: [051]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 8.0, system-id 1000000000000000094 HINT: is this the correct stanza? P00 INFO: backup command end: aborted with exception [051] @@ -981,7 +986,7 @@ incr backup - invalid system id (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [051]: database version = 9.4, system-id 1000000000000000094 does not match backup version = 9.4, system-id = 6999999999999999999 +P00 ERROR: [051]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.4, system-id 6999999999999999999 HINT: is this the correct stanza? P00 INFO: backup command end: aborted with exception [051] @@ -993,7 +998,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [069]: pg_tblspc/path is not a symlink - pg_tblspc should contain only symlinks +P00 ERROR: [069]: 'pg_data/pg_tblspc/path' is not a symlink - pg_tblspc should contain only symlinks P00 INFO: backup command end: aborted with exception [069] incr backup - invalid relative tablespace is ../ (db-master host) @@ -1004,7 +1009,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink ../ destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - invalid relative tablespace is .. (db-master host) @@ -1015,7 +1020,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink .. destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - invalid relative tablespace is ../../$PGDATA (db-master host) @@ -1026,7 +1031,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink ../../base/ destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - invalid relative tablespace is ../../$PGDATA (db-master host) @@ -1037,7 +1042,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink ../../base destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - tablespace link references a link (db-master host) @@ -1048,7 +1053,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/pg_tblspc/99999' -> '[TEST_PATH]/db-master/db/intermediate_link' cannot reference another link +P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/pg_tblspc/99999' cannot reference another link '[TEST_PATH]/db-master/db/intermediate_link' P00 INFO: backup command end: aborted with exception [070] incr backup - invalid relative tablespace in $PGDATA (db-master host) @@ -1059,7 +1064,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink ../invalid_tblspc destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base/invalid_tblspc' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - $PGDATA is a substring of valid tblspc excluding / (file missing err expected) (db-master host) @@ -1070,8 +1075,8 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [073]: unable to list file info for missing path '[TEST_PATH]/db-master/db/base_tbs' -P00 INFO: backup command end: aborted with exception [073] +P00 ERROR: [041]: unable to get info for missing path/file '[TEST_PATH]/db-master/db/base/pg_tblspc/99999/[TS_PATH-1]': [2] No such file or directory +P00 INFO: backup command end: aborted with exception [041] incr backup - invalid tablespace in $PGDATA (db-master host) > [CONTAINER-EXEC] db-master [BACKREST-BIN] --config=[TEST_PATH]/db-master/pgbackrest.conf --no-online --stanza=db backup @@ -1081,7 +1086,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink [TEST_PATH]/db-master/db/base/invalid_tblspc destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base/invalid_tblspc' is in PGDATA P00 INFO: backup command end: aborted with exception [070] incr backup - add tablespace 1 (db-master host) @@ -1097,6 +1102,23 @@ P01 INFO: backup file [TEST_PATH]/db-master/db/base/changesize.txt (4B, 68%) c P01 INFO: backup file [TEST_PATH]/db-master/db/base/zerosize.txt (0B, 68%) P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 100%) checksum 14c44cef6287269b08d41de489fd492bb9fc795d P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/17000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changecontent.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: incr backup size = 22B P00 INFO: new backup label = [BACKUP-INCR-1] P00 INFO: backup command end: completed successfully @@ -1266,8 +1288,9 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o P00 WARN: backup '[BACKUP-INCR-1]' missing manifest removed from backup.info P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: aborted backup [BACKUP-INCR-2] of same type exists, will be cleaned to remove invalid files and resumed -P00 DETAIL: clean resumed backup path: [TEST_PATH]/db-master/repo/backup/db/[BACKUP-INCR-2] +P00 WARN: resumable backup [BACKUP-INCR-2] of same type exists -- remove invalid files and resume +P00 DETAIL: remove file '[TEST_PATH]/db-master/repo/backup/db/[BACKUP-INCR-2]/pg_data/changesize.txt' from resumed backup (mismatched size) +P00 DETAIL: remove file '[TEST_PATH]/db-master/repo/backup/db/[BACKUP-INCR-2]/pg_data/zerosize.txt' from resumed backup (zero size) P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33001 (64KB, 33%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/44000_init (32KB, 49%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33000.32767 (32KB, 66%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -1288,10 +1311,26 @@ P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1638 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 INFO: backup file [TEST_PATH]/db-master/db/base/zerosize.txt (0B, 99%) -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 99%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 -P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 100%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 99%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 +P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/17000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: incr backup size = 192KB P00 INFO: new backup label = [BACKUP-INCR-2] P00 INFO: backup command end: completed successfully @@ -1470,8 +1509,8 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-INCR-2]' missing manifest removed from backup.info P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] -P00 WARN: aborted backup [BACKUP-INCR-2] cannot be resumed: new backup-type 'diff' does not match aborted backup-type 'incr' P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] +P00 WARN: backup '[BACKUP-INCR-2]' cannot be resumed: new backup type 'diff' does not match resumable backup type 'incr' P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33001 (64KB, 33%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/44000_init (32KB, 49%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33000.32767 (32KB, 66%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -1489,10 +1528,26 @@ P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1638 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 INFO: backup file [TEST_PATH]/db-master/db/base/zerosize.txt (0B, 99%) -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 99%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 -P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 100%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 99%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 +P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/17000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 192KB P00 INFO: new backup label = [BACKUP-DIFF-1] P00 INFO: backup command end: completed successfully @@ -1666,8 +1721,8 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-DIFF-1]' missing manifest removed from backup.info P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] -P00 WARN: aborted backup [BACKUP-DIFF-1] cannot be resumed: resume is disabled P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] +P00 WARN: backup '[BACKUP-DIFF-1]' cannot be resumed: resume is disabled P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33001 (64KB, 33%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/44000_init (32KB, 49%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/32768/33000.32767 (32KB, 66%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -1685,10 +1740,26 @@ P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1638 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/base/1/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 INFO: backup file [TEST_PATH]/db-master/db/base/zerosize.txt (0B, 99%) -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 99%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 -P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 -P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 100%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt (7B, 99%) checksum d85de07d6421d90aa9191c11c889bfde43680f0f P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P01 INFO: backup file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 +P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/17000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 192KB P00 INFO: new backup label = [BACKUP-DIFF-2] P00 INFO: backup command end: completed successfully @@ -2024,11 +2095,31 @@ incr backup - add files and remove tablespace 2 (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base-2 --process-max=1 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 INFO: last backup label = [BACKUP-DIFF-2], version = [VERSION-1] +P00 INFO: last backup label = [BACKUP-DIFF-2], version = 0.00 P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-DIFF-2] -P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt (8B, 61%) checksum e324463005236d83e6e54795dbddd20a74533bf3 +P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/base/base2.txt (5B, 38%) checksum 09b5e31766be1dba1ec27de82f975c1b6eea2a92 +P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt (8B, 100%) checksum e324463005236d83e6e54795dbddd20a74533bf3 P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt: file size 8 is not divisible by page size 8192 -P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/base/base2.txt (5B, 100%) checksum 09b5e31766be1dba1ec27de82f975c1b6eea2a92 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/badchecksum.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/17000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changecontent.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zerosize.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt to [BACKUP-DIFF-2] P00 INFO: incr backup size = 13B P00 INFO: new backup label = [BACKUP-INCR-3] P00 INFO: backup command end: completed successfully @@ -2201,8 +2292,10 @@ incr backup - update files - fail on missing backup.info (db-master host) P00 INFO: backup command begin [BACKREST-VERSION]: --no-compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=detail --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base-2 --protocol-timeout=60 --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [055]: unable to open [TEST_PATH]/db-master/repo/backup/db/backup.info or [TEST_PATH]/db-master/repo/backup/db/backup.info.copy -P00 ERROR: [055]: [TEST_PATH]/db-master/repo/backup/db/backup.info does not exist and is required to perform a backup. +P00 ERROR: [055]: unable to load info file '[TEST_PATH]/db-master/repo/backup/db/backup.info' or '[TEST_PATH]/db-master/repo/backup/db/backup.info.copy': + FileMissingError: unable to open missing file '[TEST_PATH]/db-master/repo/backup/db/backup.info' for read + FileMissingError: unable to open missing file '[TEST_PATH]/db-master/repo/backup/db/backup.info.copy' for read + HINT: backup.info cannot be opened and is required to perform a backup. HINT: has a stanza-create been performed? P00 INFO: backup command end: aborted with exception [055] @@ -2239,7 +2332,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 INFO: last backup label = [BACKUP-INCR-3], version = [VERSION-1] P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-INCR-3] -P00 WARN: file pg_data/base/16384/17000 timestamp in the past or size changed but timestamp did not, enabling delta checksum +P00 WARN: file 'base/16384/17000' has same timestamp as prior but different size, enabling delta checksum P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/33001 (64KB, 36%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/44000_init (32KB, 54%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/33000.32767 (32KB, 72%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -2260,6 +2353,27 @@ P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/1/ P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/PG_VERSION (3B, 99%) checksum 184473f470864e067ee3a22e64b47b0a1c356f29 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt (8B, 99%) checksum e324463005236d83e6e54795dbddd20a74533bf3 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/badchecksum.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/base2.txt to [BACKUP-INCR-3] +P00 DETAIL: reference pg_data/changecontent.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zerosize.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt to [BACKUP-DIFF-2] +P00 DETAIL: reference pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt to [BACKUP-INCR-3] P00 INFO: incr backup size = 176KB P00 INFO: new backup label = [BACKUP-INCR-4] P00 INFO: backup command end: completed successfully @@ -2458,6 +2572,21 @@ P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/ P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt: file size 8 is not divisible by page size 8192 P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 176KB P00 INFO: new backup label = [BACKUP-DIFF-3] P00 INFO: backup command end: completed successfully @@ -2647,8 +2776,8 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o P00 INFO: last backup label = [BACKUP-FULL-2], version = [VERSION-1] P00 WARN: diff backup cannot alter compress option to 'true', reset to value in [BACKUP-FULL-2] P00 WARN: diff backup cannot alter hardlink option to 'true', reset to value in [BACKUP-FULL-2] -P00 WARN: aborted backup [BACKUP-INCR-5] cannot be resumed: new backup-type 'diff' does not match aborted backup-type 'incr' P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] +P00 WARN: backup '[BACKUP-INCR-5]' cannot be resumed: new backup type 'diff' does not match resumable backup type 'incr' P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/33001 (64KB, 36%) checksum 6bf316f11d28c28914ea9be92c00de9bea6d9a6b P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/44000_init (32KB, 54%) checksum 7a16d165e4775f7c92e8cdf60c0af57313f0bf90 P01 DETAIL: match file from prior backup [TEST_PATH]/db-master/db/base-2/base/32768/33000.32767 (32KB, 72%) checksum 6e99b589e550e68e934fd235ccba59fe5b592a9e @@ -2669,6 +2798,21 @@ P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/ P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2c.txt: file size 12 is not divisible by page size 8192 P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt (7B, 100%) checksum dc7f76e43c46101b47acc55ae4d593a9e6983578 P00 WARN: page misalignment in file [TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 DETAIL: reference pg_data/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/12000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/1/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/16384/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33000.32767 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/33001 to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/44000_init to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/base/32768/PG_VERSION to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/changetime.txt to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/global/pg_control to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/pg_stat/global.stat to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/postgresql.conf to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/special-!_.*'()&!@;:+,? to [BACKUP-FULL-2] +P00 DETAIL: reference pg_data/zero_from_start to [BACKUP-FULL-2] P00 INFO: diff backup size = 176KB P00 INFO: new backup label = [BACKUP-DIFF-4] P00 INFO: backup command end: completed successfully @@ -3658,7 +3802,7 @@ diff backup - option backup-standby reset - backup performed from master (db-mas P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --compress --compress-level=3 --config=[TEST_PATH]/db-master/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/db-master/lock --log-level-console=info --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/db-master/log --log-subprocess --no-log-timestamp --no-online --pg1-path=[TEST_PATH]/db-master/db/base-2/base --protocol-timeout=60 --repo1-hardlink --repo1-path=[TEST_PATH]/db-master/repo --stanza=db --start-fast --type=diff P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 WARN: option backup-standby is enabled but standby is not properly configured - backups will be performed from the master +P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary P00 INFO: last backup label = [BACKUP-FULL-3], version = [VERSION-1] P01 INFO: backup file [TEST_PATH]/db-master/db/base-2/base/base/base2.txt (9B, 100%) checksum cafac3c59553f2cfde41ce2e62e7662295f108c0 P00 INFO: diff backup size = 9B diff --git a/test/expect/mock-all-002.log b/test/expect/mock-all-002.log index 5a1dd7181..2d42c2f6b 100644 --- a/test/expect/mock-all-002.log +++ b/test/expect/mock-all-002.log @@ -52,14 +52,14 @@ full backup - error on identical link destinations (backup host) ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [070]: link [TEST_PATH]/db-master/db/base/pg_hba.conf (../pg_config) references a subdirectory of or the same directory as link [TEST_PATH]/db-master/db/base/pg_config_bad (../../db/pg_config) +P00 ERROR: [070]: link 'pg_config_bad/pg_hba.conf.link' destination '[TEST_PATH]/db-master/db/base/pg_config_bad' is in PGDATA full backup - error on link to a link (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --type=full --stanza=db backup ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/postgresql.conf.bad' -> '../pg_config/postgresql.conf.link' cannot reference another link +P00 ERROR: [070]: link '[TEST_PATH]/db-master/db/base/postgresql.conf.bad' cannot reference another link '[TEST_PATH]/db-master/db/pg_config/postgresql.conf.link' full backup - create pg_stat link, pg_clog dir (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --manifest-save-threshold=3 --cmd-ssh=/usr/bin/ssh --pg1-port=9999 --pg1-socket-path=/test_socket_path --buffer-size=16384 --checksum-page --process-max=1 --type=full --stanza=db backup @@ -257,7 +257,7 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-FULL-1]' missing manifest removed from backup.info P00 WARN: --no-online passed and postmaster.pid exists but --force was passed so backup will continue though it looks like the postmaster is running and the backup will probably not be consistent -P00 WARN: aborted backup [BACKUP-FULL-2] of same type exists, will be cleaned to remove invalid files and resumed +P00 WARN: resumable backup [BACKUP-FULL-2] of same type exists -- remove invalid files and resume + supplemental file: [TEST_PATH]/db-master/pgbackrest.conf ---------------------------------------------------------- @@ -453,8 +453,10 @@ full backup - invalid repo (backup host) ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [055]: unable to open /bogus_path/backup/db/backup.info or /bogus_path/backup/db/backup.info.copy -P00 ERROR: [055]: /bogus_path/backup/db/backup.info does not exist and is required to perform a backup. +P00 ERROR: [055]: unable to load info file '/bogus_path/backup/db/backup.info' or '/bogus_path/backup/db/backup.info.copy': + FileMissingError: unable to open '/bogus_path/backup/db/backup.info': No such file or directory + FileMissingError: unable to open '/bogus_path/backup/db/backup.info.copy': No such file or directory + HINT: backup.info cannot be opened and is required to perform a backup. HINT: has a stanza-create been performed? restore delta, backup '[BACKUP-FULL-2]' - add and delete files (db-master host) @@ -504,7 +506,7 @@ incr backup - invalid database version (backup host) ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [051]: database version = 9.4, system-id 1000000000000000094 does not match backup version = 8.0, system-id = 1000000000000000094 +P00 ERROR: [051]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 8.0, system-id 1000000000000000094 HINT: is this the correct stanza? incr backup - invalid system id (backup host) @@ -512,7 +514,7 @@ incr backup - invalid system id (backup host) ------------------------------------------------------------------------------------------------------------------------------------ P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 ERROR: [051]: database version = 9.4, system-id 1000000000000000094 does not match backup version = 9.4, system-id = 6999999999999999999 +P00 ERROR: [051]: PostgreSQL version 9.4, system-id 1000000000000000094 do not match stanza version 9.4, system-id 6999999999999999999 HINT: is this the correct stanza? incr backup - invalid path in pg_tblspc (backup host) @@ -521,7 +523,7 @@ incr backup - invalid path in pg_tblspc (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [069]: pg_tblspc/path is not a symlink - pg_tblspc should contain only symlinks +P00 ERROR: [069]: 'pg_data/pg_tblspc/path' is not a symlink - pg_tblspc should contain only symlinks incr backup - invalid relative tablespace in $PGDATA (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --stanza=db backup @@ -529,7 +531,7 @@ incr backup - invalid relative tablespace in $PGDATA (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink ../invalid_tblspc destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base/invalid_tblspc' is in PGDATA incr backup - invalid tablespace in $PGDATA (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --stanza=db backup @@ -537,7 +539,7 @@ incr backup - invalid tablespace in $PGDATA (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 ERROR: [070]: tablespace symlink [TEST_PATH]/db-master/db/base/invalid_tblspc destination must not be in $PGDATA +P00 ERROR: [070]: link 'pg_tblspc/99999' destination '[TEST_PATH]/db-master/db/base/invalid_tblspc' is in PGDATA incr backup - add tablespace 1 (backup host) > [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --no-online --stanza=db backup @@ -744,10 +746,10 @@ P00 WARN: option repo1-retention-full is not set, the repository may run out o HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-INCR-1]' missing manifest removed from backup.info P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: file pg_data/changetime.txt timestamp in the past or size changed but timestamp did not, enabling delta checksum -P00 WARN: aborted backup [BACKUP-INCR-2] of same type exists, will be cleaned to remove invalid files and resumed -P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 WARN: file 'changetime.txt' has timestamp earlier than prior backup, enabling delta checksum +P00 WARN: resumable backup [BACKUP-INCR-2] of same type exists -- remove invalid files and resume P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 + supplemental file: [TEST_PATH]/db-master/pgbackrest.conf ---------------------------------------------------------- @@ -955,11 +957,11 @@ diff backup - cannot resume - new diff (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-INCR-2]' missing manifest removed from backup.info -P00 WARN: aborted backup [BACKUP-INCR-2] cannot be resumed: new backup-type 'diff' does not match aborted backup-type 'incr' P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: file pg_data/changetime.txt timestamp in the past or size changed but timestamp did not, enabling delta checksum -P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 WARN: file 'changetime.txt' has timestamp earlier than prior backup, enabling delta checksum +P00 WARN: backup '[BACKUP-INCR-2]' cannot be resumed: new backup type 'diff' does not match resumable backup type 'incr' P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 + supplemental file: [TEST_PATH]/db-master/pgbackrest.conf ---------------------------------------------------------- @@ -1163,11 +1165,11 @@ diff backup - cannot resume - disabled / no repo link (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: backup '[BACKUP-DIFF-1]' missing manifest removed from backup.info -P00 WARN: aborted backup [BACKUP-DIFF-1] cannot be resumed: resume is disabled P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: file pg_data/changetime.txt timestamp in the past or size changed but timestamp did not, enabling delta checksum -P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 +P00 WARN: file 'changetime.txt' has timestamp earlier than prior backup, enabling delta checksum +P00 WARN: backup '[BACKUP-DIFF-1]' cannot be resumed: resume is disabled P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/1/[TS_PATH-1]/16384/tablespace1.txt: file size 7 is not divisible by page size 8192 +P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 + supplemental file: [TEST_PATH]/db-master/pgbackrest.conf ---------------------------------------------------------- @@ -1600,7 +1602,7 @@ incr backup - update files (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: incr backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-INCR-3] -P00 WARN: file pg_data/base/16384/17000 timestamp in the past or size changed but timestamp did not, enabling delta checksum +P00 WARN: file 'base/16384/17000' has same timestamp as prior but different size, enabling delta checksum P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/base/16384/17000: file size 8 is not divisible by page size 8192 + supplemental file: [TEST_PATH]/db-master/pgbackrest.conf @@ -1807,7 +1809,7 @@ diff backup - updates since last full (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: file pg_data/base/16384/17000 timestamp in the past or size changed but timestamp did not, enabling delta checksum +P00 WARN: file 'base/16384/17000' has same timestamp as prior but different size, enabling delta checksum P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/base/16384/17000: file size 8 is not divisible by page size 8192 P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2b.txt: file size 8 is not divisible by page size 8192 P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 @@ -2026,9 +2028,9 @@ diff backup - remove files during backup (backup host) P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. P00 WARN: diff backup cannot alter compress option to 'true', reset to value in [BACKUP-FULL-2] -P00 WARN: aborted backup [BACKUP-INCR-5] cannot be resumed: new backup-type 'diff' does not match aborted backup-type 'incr' P00 WARN: diff backup cannot alter 'checksum-page' option to 'false', reset to 'true' from [BACKUP-FULL-2] -P00 WARN: file pg_data/changetime.txt timestamp in the past or size changed but timestamp did not, enabling delta checksum +P00 WARN: file 'changetime.txt' has timestamp earlier than prior backup, enabling delta checksum +P00 WARN: backup '[BACKUP-INCR-5]' cannot be resumed: new backup type 'diff' does not match resumable backup type 'incr' P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2c.txt: file size 12 is not divisible by page size 8192 P00 WARN: page misalignment in file db-master:[TEST_PATH]/db-master/db/base-2/pg_tblspc/2/[TS_PATH-1]/32768/tablespace2.txt: file size 7 is not divisible by page size 8192 @@ -2897,7 +2899,7 @@ diff backup - option backup-standby reset - backup performed from master (backup P00 INFO: backup command begin [BACKREST-VERSION]: --backup-standby --compress --compress-level=3 --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=info --log-level-file=trace --log-level-stderr=off --log-path=[TEST_PATH]/backup/log --log-subprocess --no-log-timestamp --no-online --pg1-host=db-master --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-master/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-master/db/base-2/base --process-max=2 --protocol-timeout=60 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key= --repo1-s3-key-secret= --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db --start-fast --type=diff P00 WARN: option repo1-retention-full is not set, the repository may run out of space HINT: to retain full backups indefinitely (without warning), set option 'repo1-retention-full' to the maximum. -P00 WARN: option backup-standby is enabled but standby is not properly configured - backups will be performed from the master +P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary P00 INFO: last backup label = [BACKUP-FULL-3], version = [VERSION-1] P01 INFO: backup file db-master:[TEST_PATH]/db-master/db/base-2/base/base/base2.txt (9B, 100%) checksum cafac3c59553f2cfde41ce2e62e7662295f108c0 P00 INFO: diff backup size = 9B diff --git a/test/lib/pgBackRestTest/Module/Backup/BackupFileUnitPerlTest.pm b/test/lib/pgBackRestTest/Module/Backup/BackupFileUnitPerlTest.pm deleted file mode 100644 index e74f3fbcb..000000000 --- a/test/lib/pgBackRestTest/Module/Backup/BackupFileUnitPerlTest.pm +++ /dev/null @@ -1,511 +0,0 @@ -#################################################################################################################################### -# Tests for Backup File module -#################################################################################################################################### -package pgBackRestTest::Module::Backup::BackupFileUnitPerlTest; -use parent 'pgBackRestTest::Env::HostEnvTest'; - -#################################################################################################################################### -# Perl includes -#################################################################################################################################### -use strict; -use warnings FATAL => qw(all); -use Carp qw(confess); - -use File::Basename qw(dirname); -use Storable qw(dclone); - -use pgBackRest::Backup::File; -use pgBackRest::Common::Exception; -use pgBackRest::Common::Ini; -use pgBackRest::Common::Log; -use pgBackRest::Common::String; -use pgBackRest::Common::Wait; -use pgBackRest::Config::Config; -use pgBackRest::DbVersion; -use pgBackRest::Manifest; -use pgBackRest::Protocol::Helper; -use pgBackRest::Protocol::Storage::Helper; -use pgBackRest::Storage::Helper; - -use pgBackRestTest::Common::ExecuteTest; -use pgBackRestTest::Common::RunTest; -use pgBackRestTest::Env::Host::HostBackupTest; - -#################################################################################################################################### -# initModule -#################################################################################################################################### -sub initModule -{ - my $self = shift; - - $self->{strDbPath} = $self->testPath() . '/db'; - $self->{strRepoPath} = $self->testPath() . '/repo'; - $self->{strBackupPath} = "$self->{strRepoPath}/backup/" . $self->stanza(); - $self->{strPgControl} = $self->{strDbPath} . '/' . DB_FILE_PGCONTROL; - - # Create backup path - storageTest()->pathCreate($self->{strBackupPath}, {bIgnoreExists => true, bCreateParent => true}); - - # Generate pg_control file - storageTest()->pathCreate($self->{strDbPath} . '/' . DB_PATH_GLOBAL, {bCreateParent => true}); - $self->controlGenerate($self->{strDbPath}, PG_VERSION_94); -} - -#################################################################################################################################### -# run -#################################################################################################################################### -sub run -{ - my $self = shift; - - $self->optionTestSet(CFGOPT_STANZA, $self->stanza()); - $self->optionTestSet(CFGOPT_PG_PATH, $self->{strDbPath}); - $self->optionTestSet(CFGOPT_REPO_PATH, $self->{strRepoPath}); - $self->optionTestSet(CFGOPT_LOG_PATH, $self->testPath()); - - $self->optionTestSetBool(CFGOPT_ONLINE, false); - - $self->optionTestSet(CFGOPT_DB_TIMEOUT, 5); - $self->optionTestSet(CFGOPT_PROTOCOL_TIMEOUT, 6); - $self->optionTestSet(CFGOPT_COMPRESS_LEVEL, 3); - - $self->configTestLoad(CFGCMD_BACKUP); - - # Repo - my $strRepoBackupPath = storageRepo()->pathGet(STORAGE_REPO_BACKUP); - my $strBackupLabel = "20180724-182750F"; - - # File - my $strFileName = "12345"; - my $strFileDb = $self->{strDbPath} . "/$strFileName"; - my $strFileHash = '1c7e00fd09b9dd11fc2966590b3e3274645dd031'; - my $strFileRepo = storageRepo()->pathGet( - STORAGE_REPO_BACKUP . "/$strBackupLabel/" . MANIFEST_TARGET_PGDATA . "/$strFileName"); - my $strRepoFile = MANIFEST_TARGET_PGDATA . "/$strFileName"; - my $strRepoPgControl = MANIFEST_FILE_PGCONTROL; - my $strPgControlRepo = storageRepo()->pathGet(STORAGE_REPO_BACKUP . "/$strBackupLabel/$strRepoPgControl"); - my $strPgControlHash = - $self->archBits() == 32 ? '8107e546c59c72a8c1818fc3610d7cc1e5623660' : '4c77c900f7af0d9ab13fa9982051a42e0b637f6c'; - - # Copy file to db path - executeTest('cp ' . $self->dataPath() . "/filecopy.archive2.bin ${strFileDb}"); - - # Get size and data info for the files in the db path - my $hManifest = storageDb()->manifest($self->{strDbPath}); - my $lFileSize = $hManifest->{$strFileName}{size} + 0; - my $lFileTime = $hManifest->{$strFileName}{modification_time} + 0; - my $lPgControlSize = $hManifest->{&DB_FILE_PGCONTROL}{size} + 0; - my $lPgControlTime = $hManifest->{&DB_FILE_PGCONTROL}{modification_time} + 0; - - my $lRepoFileCompressSize = 3646899; - - my $strBackupPath = $self->{strBackupPath} . "/$strBackupLabel"; - my $strHost = "host"; - my $iLocalId = 1; - - # Initialize the manifest - my $oBackupManifest = new pgBackRest::Manifest("$strBackupPath/" . FILE_MANIFEST, - {bLoad => false, strDbVersion => PG_VERSION_94, iDbCatalogVersion => 201409291}); - $oBackupManifest->build(storageDb(), $self->{strDbPath}, undef, true, false); - - # Set the initial size values for backupManifestUpdate - running size and size for when to save the file - my $lSizeCurrent = 0; - my $lSizeTotal = 16785408; - my $lManifestSaveCurrent = 0; - my $lManifestSaveSize = int($lSizeTotal / 100); - - # Result variables - my $iResultCopyResult; - my $lResultCopySize; - my $lResultRepoSize; - my $strResultCopyChecksum; - my $rResultExtra; - - ################################################################################################################################ - if ($self->begin('backupFile(), backupManifestUpdate()')) - { - #--------------------------------------------------------------------------------------------------------------------------- - # Create backup path so manifest can be saved - storageRepo->pathCreate(storageRepo()->pathGet(STORAGE_REPO_BACKUP . "/$strBackupLabel")); - - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $self->{strPgControl}, - $strRepoPgControl, - $lPgControlSize, - undef, - false, - BACKUP_FILE_COPY, - 8192, - 8192, - $strPgControlHash, - undef, - 16785408, - 0, - 167854, - 0); - - # Accumulators should be same size as pg_control - $self->testResult(($lSizeCurrent == $lPgControlSize && $lManifestSaveCurrent == $lPgControlSize), true, - "file size in repo and repo size equal pg_control size"); - - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, MANIFEST_FILE_PGCONTROL, - MANIFEST_SUBKEY_CHECKSUM, $strPgControlHash)}, true, "manifest updated for pg_control"); - - # Neither backup.manifest nor backup.manifest.copy written because size threshold not met - $self->testResult(sub {storageRepo()->exists("$strBackupPath/" . FILE_MANIFEST)}, false, "backup.manifest missing"); - $self->testResult( - sub {storageRepo()->exists("$strBackupPath/" . FILE_MANIFEST . INI_COPY_EXT)}, false, "backup.manifest.copy missing"); - - #--------------------------------------------------------------------------------------------------------------------------- - # No prior checksum, no compression, no page checksum, no extra, no delta, no hasReference - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - false, - BACKUP_FILE_COPY, - 16777216, - 16777216, - '1c7e00fd09b9dd11fc2966590b3e3274645dd031', - undef, - 16785408, - 8192, - 167854, - 8192); - - # Accumulator includes size of pg_control and file. Manifest saved so ManifestSaveCurrent returns to 0 - $self->testResult(($lSizeCurrent == ($lPgControlSize + $lFileSize) && $lManifestSaveCurrent == 0), true, - "repo size increased and ManifestSaveCurrent returns to 0"); - - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, - MANIFEST_SUBKEY_CHECKSUM, $strFileHash)}, true, "manifest updated for $strRepoFile"); - - # Backup.manifest not written but backup.manifest.copy written because size threshold met - $self->testResult(sub {storageTest()->exists("$strBackupPath/" . FILE_MANIFEST . INI_COPY_EXT)}, true, - 'backup.manifest.copy exists in repo'); - $self->testResult( - sub {storageRepo()->exists("$strBackupPath/" . FILE_MANIFEST)}, false, 'backup.manifest.copy missing in repo'); - - #--------------------------------------------------------------------------------------------------------------------------- - # Set up page checksum result - $rResultExtra = {'valid' => true,'align' => true}; - - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - BACKUP_FILE_COPY, - 16777216, - 3646899, - '1c7e00fd09b9dd11fc2966590b3e3274645dd031', - $rResultExtra, - 16785408, - 16785408, - 167854, - 0); - - # File is compressed in repo so make sure repo-size added to manifest - $self->testResult(sub {$oBackupManifest->test( - MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REPO_SIZE, $lResultRepoSize)}, - true, "repo-size set"); - $self->testResult(sub {$oBackupManifest->test( - MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM_PAGE, $rResultExtra->{bValid})}, - true, "checksum page set"); - - # Set a section in the manifest to ensure it is removed in the next test - $oBackupManifest->set( - MANIFEST_SECTION_TARGET_FILE, "$strRepoFile.1", MANIFEST_SUBKEY_CHECKSUM, $strResultCopyChecksum); - - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, MANIFEST_TARGET_PGDATA . "/$strFileName.1")}, - true, MANIFEST_TARGET_PGDATA . "/$strFileName.1 section exists in manifest - skip file"); - - #--------------------------------------------------------------------------------------------------------------------------- - # Removed db file is removed from manifest - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - "$strFileDb.1", - "$strRepoFile.1", - $lFileSize, - $strFileHash, - false, - BACKUP_FILE_SKIP, - undef, - undef, - undef, - undef, - 16785408, - 33562624, - 167854, - 0); - - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, "$strRepoFile.1")}, - false, " $strRepoFile.1 section removed from manifest"); - - # Add back the section - $oBackupManifest->set(MANIFEST_SECTION_TARGET_FILE, "$strRepoFile.1"); - - # Code coverage for code path when host not defined for logged message of skipped file - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - undef, - $iLocalId, - "$strFileDb.1", - MANIFEST_TARGET_PGDATA . "/$strFileName.1", - $lFileSize, - $strFileHash, - false, - BACKUP_FILE_SKIP, - undef, - undef, - undef, - undef, - 16785408, - 50339840, - 167854, - 0); - - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, "$strRepoFile.1")}, - false, " $strRepoFile.1 section removed from manifest on undef host"); - - #--------------------------------------------------------------------------------------------------------------------------- - # Has reference - Code path to ensure reference is removed - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - false, - BACKUP_FILE_COPY, - 16777216, - 16777216, - '1c7e00fd09b9dd11fc2966590b3e3274645dd031', - undef, - 16785408, - 67117056, - 167854, - 0); - - # Confirm reference to prior backup removed - $self->testResult(sub {$oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, MANIFEST_TARGET_PGDATA . "/$strFileName.", - MANIFEST_SUBKEY_REFERENCE)}, - false, "reference to prior backup in manifest removed"); - - #--------------------------------------------------------------------------------------------------------------------------- - # BACKUP_FILE_NOOP - - # Calculate running counts - my $lSizeCurrentAfter = $lSizeCurrent + $lFileSize; - my $lManifestSaveCurrentAfter = $lManifestSaveCurrent + $lFileSize; - - # Increase manifest save size, so manifest will not be saved so counts can be tested - $lManifestSaveSize = $lFileSize * 2; - - ($lSizeCurrent, $lManifestSaveCurrent) = backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - false, - BACKUP_FILE_NOOP, - 16777216, - undef, - '1c7e00fd09b9dd11fc2966590b3e3274645dd031', - undef, - 16785408, - 83894272, - $lManifestSaveSize, - 0); - - $self->testResult(($lSizeCurrent ==$lSizeCurrentAfter && $lManifestSaveCurrent == $lManifestSaveCurrentAfter), - true, ' running counts updated'); - } - - ################################################################################################################################ - # This section for for code coverage that is not covered in the above tests - if ($self->begin('backupManifestUpdate()')) - { - $oBackupManifest = new pgBackRest::Manifest("$strBackupPath/" . FILE_MANIFEST, - {bLoad => false, strDbVersion => PG_VERSION_94, iDbCatalogVersion => 201409291}); - - #--------------------------------------------------------------------------------------------------------------------------- - # Check BACKUP_FILE_RECOPY warning - $iResultCopyResult = BACKUP_FILE_RECOPY; - $lResultCopySize = 0; - $lResultRepoSize = $lResultCopySize + 1; - $strResultCopyChecksum = $strFileHash; - $lSizeCurrent = 0; - $lManifestSaveSize = $lFileSize * 2; - $lManifestSaveCurrent = 0; - $rResultExtra = undef; - - $self->testResult(sub {backupManifestUpdate( - $oBackupManifest, - undef, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - false, - $iResultCopyResult, - $lResultCopySize, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, "($lFileSize, $lFileSize)", - 'backup file recopy warning', {strLogExpect => - "WARN: resumed backup file $strRepoFile does not have expected checksum $strFileHash. The file will be recopied and" . - " backup will continue but this may be an issue unless the resumed backup path in the repository is known to be" . - " corrupted.\n" . - "NOTE: this does not indicate a problem with the PostgreSQL page checksums."}); - - # Check size code paths - $self->testResult( - $oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_SIZE, $lResultCopySize), - true, " copy size set"); - $self->testResult( - $oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_REPO_SIZE, $lResultRepoSize), - true, " repo size set"); - $self->testResult( - $oBackupManifest->test(MANIFEST_SECTION_TARGET_FILE, $strRepoFile, MANIFEST_SUBKEY_CHECKSUM, $strResultCopyChecksum), - false, " checksum not set since copy size 0"); - - #--------------------------------------------------------------------------------------------------------------------------- - # Checksum page exception - $iResultCopyResult = BACKUP_FILE_COPY; - - $self->testException(sub {backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - $iResultCopyResult, - $lResultCopySize, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, - ERROR_ASSERT, "$strFileDb should have calculated page checksums"); - - $rResultExtra->{valid} = false; - $self->testException(sub {backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - $iResultCopyResult, - $lResultCopySize + 1, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, - ERROR_ASSERT, "align flag should have been set for misaligned page"); - - $rResultExtra->{align} = true; - $self->testException(sub {backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - $iResultCopyResult, - $lResultCopySize + 1, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, - ERROR_ASSERT, "align flag should have been set for misaligned page"); - - $rResultExtra->{align} = false; - $self->testResult(sub {backupManifestUpdate( - $oBackupManifest, - $strHost, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - $iResultCopyResult, - $lResultCopySize + 1, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, - "($lFileSize, $lFileSize)", - 'page misalignment warning - host defined', {strLogExpect => - "WARN: page misalignment in file $strHost:$strFileDb: file size " . ($lResultCopySize + 1) . - " is not divisible by page size " . PG_PAGE_SIZE}); - - $self->testResult(sub {backupManifestUpdate( - $oBackupManifest, - undef, - $iLocalId, - $strFileDb, - $strRepoFile, - $lFileSize, - $strFileHash, - true, - $iResultCopyResult, - $lResultCopySize + 1, - $lResultRepoSize, - $strResultCopyChecksum, - $rResultExtra, - $lSizeTotal, - $lSizeCurrent, - $lManifestSaveSize, - $lManifestSaveCurrent)}, - "($lFileSize, $lFileSize)", - 'page misalignment warning - host not defined', {strLogExpect => - "WARN: page misalignment in file $strFileDb: file size " . ($lResultCopySize + 1) . - " is not divisible by page size " . PG_PAGE_SIZE}); - } -} - -1; diff --git a/test/lib/pgBackRestTest/Module/Backup/BackupUnitPerlTest.pm b/test/lib/pgBackRestTest/Module/Backup/BackupUnitPerlTest.pm deleted file mode 100644 index 5cce8a672..000000000 --- a/test/lib/pgBackRestTest/Module/Backup/BackupUnitPerlTest.pm +++ /dev/null @@ -1,409 +0,0 @@ -#################################################################################################################################### -# Tests for Backup module -#################################################################################################################################### -package pgBackRestTest::Module::Backup::BackupUnitPerlTest; -use parent 'pgBackRestTest::Env::HostEnvTest'; - -#################################################################################################################################### -# Perl includes -#################################################################################################################################### -use strict; -use warnings FATAL => qw(all); -use Carp qw(confess); - -use File::Basename qw(dirname); -use Storable qw(dclone); - -use pgBackRest::Backup::Backup; -use pgBackRest::Backup::Common; -use pgBackRest::Common::Exception; -use pgBackRest::Common::Log; -use pgBackRest::Common::String; -use pgBackRest::Common::Wait; -use pgBackRest::Config::Config; -use pgBackRest::DbVersion; -use pgBackRest::Manifest; -use pgBackRest::Protocol::Helper; -use pgBackRest::Protocol::Storage::Helper; -use pgBackRest::Storage::Helper; - -use pgBackRestTest::Common::ContainerTest; -use pgBackRestTest::Common::ExecuteTest; -use pgBackRestTest::Common::FileTest; -use pgBackRestTest::Common::RunTest; -use pgBackRestTest::Env::Host::HostBackupTest; - -#################################################################################################################################### -# run -#################################################################################################################################### -sub run -{ - my $self = shift; - - ################################################################################################################################ - if ($self->begin('backupRegExpGet()')) - { - # Expected results matrix - my $hExpected = {}; - $hExpected->{&false}{&false}{&true}{&false} = '[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}I'; - $hExpected->{&false}{&false}{&true}{&true} = '^[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}I$'; - $hExpected->{&false}{&true}{&false}{&false} = '[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}D'; - $hExpected->{&false}{&true}{&false}{&true} = '^[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}D$'; - $hExpected->{&false}{&true}{&true}{&false} = '[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}(D|I)'; - $hExpected->{&false}{&true}{&true}{&true} = '^[0-9]{8}\-[0-9]{6}F\_[0-9]{8}\-[0-9]{6}(D|I)$'; - $hExpected->{&true}{&false}{&false}{&false} = '[0-9]{8}\-[0-9]{6}F'; - $hExpected->{&true}{&false}{&false}{&true} = '^[0-9]{8}\-[0-9]{6}F$'; - $hExpected->{&true}{&false}{&true}{&false} = '[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}I){0,1}'; - $hExpected->{&true}{&false}{&true}{&true} = '^[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}I){0,1}$'; - $hExpected->{&true}{&true}{&false}{&false} = '[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}D){0,1}'; - $hExpected->{&true}{&true}{&false}{&true} = '^[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}D){0,1}$'; - $hExpected->{&true}{&true}{&true}{&false} = '[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}(D|I)){0,1}'; - $hExpected->{&true}{&true}{&true}{&true} = '^[0-9]{8}\-[0-9]{6}F(\_[0-9]{8}\-[0-9]{6}(D|I)){0,1}$'; - - # Iterate though all possible combinations - for (my $bFull = false; $bFull <= true; $bFull++) - { - for (my $bDiff = false; $bDiff <= true; $bDiff++) - { - for (my $bIncr = false; $bIncr <= true; $bIncr++) - { - for (my $bAnchor = false; $bAnchor <= true; $bAnchor++) - { - # Make sure that an assertion is thrown if no types are requested - if (!($bFull || $bDiff || $bIncr)) - { - $self->testException( - sub {backupRegExpGet($bFull, $bDiff, $bIncr, $bAnchor)}, - ERROR_ASSERT, 'at least one backup type must be selected'); - } - # Else make sure the returned value is correct - else - { - $self->testResult( - sub {backupRegExpGet($bFull, $bDiff, $bIncr, $bAnchor)}, $hExpected->{$bFull}{$bDiff}{$bIncr}{$bAnchor}, - "expression full $bFull, diff $bDiff, incr $bIncr, anchor $bAnchor = " . - $hExpected->{$bFull}{$bDiff}{$bIncr}{$bAnchor}); - } - } - } - } - } - } - - ################################################################################################################################ - if ($self->begin('backupLabelFormat()')) - { - my $strBackupLabelFull = timestampFileFormat(undef, 1482000000) . 'F'; - $self->testResult(sub {backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, undef, 1482000000)}, $strBackupLabelFull, - 'full backup label'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testException( - sub {backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, $strBackupLabelFull, 1482000000)}, - ERROR_ASSERT, "strBackupLabelLast must not be defined when strType = 'full'"); - - #--------------------------------------------------------------------------------------------------------------------------- - my $strBackupLabelDiff = "${strBackupLabelFull}_" . timestampFileFormat(undef, 1482000400) . 'D'; - $self->testResult( - sub {backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_DIFF, $strBackupLabelFull, 1482000400)}, $strBackupLabelDiff, - 'diff backup label'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testException( - sub {backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_DIFF, undef, 1482000400)}, - ERROR_ASSERT, "strBackupLabelLast must be defined when strType = 'diff'"); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult( - sub {backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_INCR, $strBackupLabelDiff, 1482000800)}, - "${strBackupLabelFull}_" . timestampFileFormat(undef, 1482000800) . 'I', - 'incremental backup label'); - } - - ################################################################################################################################ - if ($self->begin('backupLabel()')) - { - $self->optionTestSet(CFGOPT_STANZA, $self->stanza()); - $self->optionTestSet(CFGOPT_REPO_PATH, $self->testPath() . '/repo'); - $self->configTestLoad(CFGCMD_ARCHIVE_PUSH); - - #--------------------------------------------------------------------------------------------------------------------------- - my $lTime = time(); - - my $strFullLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - storageRepo()->pathCreate(STORAGE_REPO_BACKUP . "/${strFullLabel}", {bCreateParent => true}); - - my $strNewFullLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - - $self->testResult(sub {$strFullLabel ne $strNewFullLabel}, true, 'new full label <> existing full backup dir'); - - #--------------------------------------------------------------------------------------------------------------------------- - executeTest('rmdir ' . storageRepo()->pathGet(STORAGE_REPO_BACKUP . "/${strFullLabel}")); - - storageRepo()->pathCreate( - STORAGE_REPO_BACKUP . qw(/) . PATH_BACKUP_HISTORY . '/' . timestampFormat('%4d', $lTime), {bCreateParent => true}); - storageRepo()->put( - STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY . '/' . timestampFormat('%4d', $lTime) . - "/${strFullLabel}.manifest." . COMPRESS_EXT); - - $strNewFullLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - - $self->testResult(sub {$strFullLabel ne $strNewFullLabel}, true, 'new full label <> existing full history file'); - - #--------------------------------------------------------------------------------------------------------------------------- - $lTime = time() + 1000; - $strFullLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - - $strNewFullLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - - $self->testResult(sub {$strFullLabel eq $strNewFullLabel}, true, 'new full label in future'); - - #--------------------------------------------------------------------------------------------------------------------------- - $lTime = time(); - - $strFullLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - my $strDiffLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_DIFF, $strFullLabel, $lTime); - storageRepo()->pathCreate(STORAGE_REPO_BACKUP . "/${strDiffLabel}", {bCreateParent => true}); - - my $strNewDiffLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_DIFF, $strFullLabel, $lTime); - - $self->testResult(sub {$strDiffLabel ne $strNewDiffLabel}, true, 'new diff label <> existing diff backup dir'); - - #--------------------------------------------------------------------------------------------------------------------------- - executeTest('rmdir ' . storageRepo()->pathGet(STORAGE_REPO_BACKUP . "/${strDiffLabel}")); - - storageRepo()->pathCreate( - STORAGE_REPO_BACKUP . qw(/) . PATH_BACKUP_HISTORY . '/' . timestampFormat('%4d', $lTime), - {bIgnoreExists => true, bCreateParent => true}); - storageRepo()->put( - STORAGE_REPO_BACKUP . qw{/} . PATH_BACKUP_HISTORY . '/' . timestampFormat('%4d', $lTime) . - "/${strDiffLabel}.manifest." . COMPRESS_EXT); - - $strNewDiffLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_DIFF, $strFullLabel, $lTime); - - $self->testResult(sub {$strDiffLabel ne $strNewDiffLabel}, true, 'new full label <> existing diff history file'); - - #--------------------------------------------------------------------------------------------------------------------------- - $lTime = time() + 1000; - $strDiffLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_DIFF, $strFullLabel, $lTime); - - $strNewDiffLabel = backupLabel(storageRepo(), CFGOPTVAL_BACKUP_TYPE_DIFF, $strFullLabel, $lTime); - - $self->testResult(sub {$strDiffLabel eq $strNewDiffLabel}, true, 'new diff label in future'); - } - - ################################################################################################################################ - if ($self->begin('resumeClean()')) - { - $self->optionTestSet(CFGOPT_STANZA, $self->stanza()); - $self->optionTestSet(CFGOPT_REPO_PATH, $self->testPath() . '/repo'); - $self->optionTestSet(CFGOPT_PG_PATH, $self->testPath() . '/db'); - $self->configTestLoad(CFGCMD_BACKUP); - - my $lTime = time(); - - my $strFullLabel = backupLabelFormat(CFGOPTVAL_BACKUP_TYPE_FULL, undef, $lTime); - storageRepo()->pathCreate(STORAGE_REPO_BACKUP . "/${strFullLabel}", {bCreateParent => true}); - my $strBackupPath = storageRepo()->pathGet(STORAGE_REPO_BACKUP . "/${strFullLabel}"); - my $strBackupManifestFile = "$strBackupPath/" . FILE_MANIFEST; - - my $strPath = "path"; - my $strSubPath = "$strBackupPath/$strPath"; - my $strInManifestNoChecksum = 'in_manifest_no_checksum'; - my $strInManifestWithChecksum = 'in_manifest_with_checksum'; - my $strInManifestWithReference = 'in_manifest_with_reference'; - - my $strExpectedManifest = $self->testPath() . '/expected.manifest'; - my $strAbortedManifest = $self->testPath() . '/aborted.manifest'; - my $oManifest = new pgBackRest::Manifest( - $strBackupManifestFile, - {bLoad => false, strDbVersion => PG_VERSION_94, iDbCatalogVersion => $self->dbCatalogVersion(PG_VERSION_94)}); - my $oAbortedManifest = new pgBackRest::Manifest( - $strBackupManifestFile, - {bLoad => false, strDbVersion => PG_VERSION_94, iDbCatalogVersion => $self->dbCatalogVersion(PG_VERSION_94)}); - my $oBackup = new pgBackRest::Backup::Backup(); - - $oAbortedManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_ONLINE, undef, false); - - # Compression prior enabled, gzip file exists and not in manifest, dir exists and is in manifest, delta not enabled - #--------------------------------------------------------------------------------------------------------------------------- - $oAbortedManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, true); - storageRepo()->put(storageRepo()->openWrite($strBackupPath . '/' . BOGUS . '.gz', - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->pathCreate($strSubPath, {bIgnoreExists => true}); - - my $hDefault = {}; - $oManifest->set(MANIFEST_SECTION_TARGET_PATH, $strPath, undef, $hDefault); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, false, 'resumeClean, prior compression enabled, delta not enabled'); - $self->testResult(sub {!storageRepo()->exists($strBackupPath . '/' . BOGUS . '.gz')}, true, ' gzip file removed'); - $self->testResult(sub {storageRepo()->pathExists($strSubPath)}, true, ' path not removed'); - - # Disable compression - $oAbortedManifest->boolSet(MANIFEST_SECTION_BACKUP_OPTION, MANIFEST_KEY_COMPRESS, undef, false); - $oManifest->remove(MANIFEST_SECTION_TARGET_PATH, $strPath); - - # Path and files to be removed (not in oManifest) - #--------------------------------------------------------------------------------------------------------------------------- - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/" . FILE_MANIFEST_COPY, - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->put(storageRepo()->openWrite($strSubPath . "/" . BOGUS, - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/" . BOGUS, - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - - $self->testResult(sub {storageRepo()->pathExists($strSubPath) && storageRepo()->exists($strSubPath . "/" . BOGUS) && - storageRepo()->exists($strBackupPath . "/" . BOGUS)}, - true, 'dir and files to be removed exist'); - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, true, - undef, undef)}, true, 'resumeClean, delta enabled, path and files to remove, manifest copy to remain'); - $self->testResult(sub {!storageRepo()->pathExists($strSubPath) && !storageRepo()->exists($strSubPath . "/" . BOGUS)}, - true, ' dir removed'); - $self->testResult(sub {!storageRepo()->exists($strBackupPath . "/" . BOGUS) && - storageRepo()->exists($strBackupPath . "/" . FILE_MANIFEST_COPY)}, true, - ' file removed, manifest copy remains'); - - # Online changed, delta enabled - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, true, false, - undef, undef)}, true, 'resumeClean, online changed, delta enabled'); - - # Online does not change, only backup.manifest.copy exists, delta not enabled - #--------------------------------------------------------------------------------------------------------------------------- - storageRepo()->put(storageRepo()->openWrite($strBackupPath . '/' . BOGUS, - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/$strInManifestWithReference", - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/$strInManifestNoChecksum", - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime})); - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/$strInManifestWithChecksum", - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime}), 'test'); - my ($strHash, $iSize) = storageRepo()->hashSize(storageRepo()->openRead($strBackupPath . "/$strInManifestWithChecksum")); - - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestNoChecksum, - MANIFEST_SUBKEY_SIZE, 0); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestNoChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_SIZE, $iSize); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_SIZE, 0); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_REFERENCE, BOGUS); - - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestNoChecksum, - MANIFEST_SUBKEY_SIZE, 0); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestNoChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_SIZE, $iSize); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oAbortedManifest->set(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM, $strHash); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_SIZE, 0); - $oManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - $oManifest->set(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithReference, - MANIFEST_SUBKEY_REFERENCE, BOGUS); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, false, 'resumeClean, online not changed, delta not enabled'); - $self->testResult(sub {!storageRepo()->exists($strBackupPath . "/" . BOGUS) && - !storageRepo()->exists($strBackupPath . "/$strInManifestNoChecksum") && - !storageRepo()->exists($strBackupPath . "/$strInManifestWithReference") && - storageRepo()->exists($strBackupPath . "/$strInManifestWithChecksum") && - storageRepo()->exists($strBackupPath . "/" . FILE_MANIFEST_COPY)}, true, - ' file not in manifest or in manifest but no-checksum removed, file in manifest and manifest.copy remains'); - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM, - $strHash)}, true, ' checksum copied to manifest'); - - # Timestamp in the past for same-sized file with checksum. - #--------------------------------------------------------------------------------------------------------------------------- - $oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM); - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM)}, false, 'manifest checksum does not exist'); - - # Set the timestamp so that the new manifest appears to have a time in the past. This should enable delta. - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime + 100); - - # Set checksum page for code coverage - $oAbortedManifest->set(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM_PAGE, false); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, true, ' resumeClean, timestamp in past, delta enabled'); - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM, - $strHash) && $oManifest->boolTest(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM_PAGE, false)}, true, ' checksum copied to manifest'); - - # Timestamp different for same-sized file with checksum. - #--------------------------------------------------------------------------------------------------------------------------- - $oManifest->remove(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime - 100); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, false, 'resumeClean, timestamp different but size the same, delta not enabled'); - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM) && !storageRepo()->exists($strBackupPath . "/$strInManifestWithChecksum")}, - false, ' checksum not copied to manifest, file removed'); - - # Size different, timestamp same for file with checksum. - #--------------------------------------------------------------------------------------------------------------------------- - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/$strInManifestWithChecksum", - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime}), 'test'); - - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_SIZE, $iSize - 1); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, true, 'resumeClean, size different, timestamp same, delta enabled'); - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM) && !storageRepo()->exists($strBackupPath . "/$strInManifestWithChecksum")}, - false, ' checksum not copied to manifest, file removed'); - - # Checksum page error and link to file. - #--------------------------------------------------------------------------------------------------------------------------- - storageRepo()->put(storageRepo()->openWrite($strBackupPath . "/$strInManifestWithChecksum", - {strMode => '0750', strUser => TEST_USER, strGroup => TEST_GROUP, lTimestamp => $lTime}), 'test'); - testLinkCreate($strBackupPath . "/testlink", $strBackupPath . "/$strInManifestWithChecksum"); - $self->testResult(sub {storageRepo()->exists($strBackupPath . "/testlink")}, true, 'link exists'); - - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_SIZE, $iSize); - $oAbortedManifest->numericSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_TIMESTAMP, $lTime); - - # Set checksum page for code coverage - $oAbortedManifest->boolSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM_PAGE, false); - $oAbortedManifest->set(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR, 'E'); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, false, ' resumeClean, delta not enabled'); - - $self->testResult(sub {$oManifest->test(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM_PAGE_ERROR, 'E') && !storageRepo()->exists($strBackupPath . "/testlink")}, - true, ' checksum page error copied to manifest, link removed'); - - # Checksum page=true - #--------------------------------------------------------------------------------------------------------------------------- - $oAbortedManifest->boolSet(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, MANIFEST_SUBKEY_CHECKSUM_PAGE, true); - - $self->testResult(sub {$oBackup->resumeClean(storageRepo(), $strFullLabel, $oManifest, $oAbortedManifest, false, false, - undef, undef)}, false, ' resumeClean, checksum page = true'); - - $self->testResult(sub {$oManifest->boolTest(MANIFEST_SECTION_TARGET_FILE, $strInManifestWithChecksum, - MANIFEST_SUBKEY_CHECKSUM_PAGE, true)}, true, ' checksum page set true in manifest'); - } -} - -1; diff --git a/test/lib/pgBackRestTest/Module/Command/CommandArchiveCommonPerlTest.pm b/test/lib/pgBackRestTest/Module/Command/CommandArchiveCommonPerlTest.pm deleted file mode 100644 index 41fd84c3a..000000000 --- a/test/lib/pgBackRestTest/Module/Command/CommandArchiveCommonPerlTest.pm +++ /dev/null @@ -1,154 +0,0 @@ -#################################################################################################################################### -# Archive Common Tests -#################################################################################################################################### -package pgBackRestTest::Module::Command::CommandArchiveCommonPerlTest; -use parent 'pgBackRestTest::Env::HostEnvTest'; - -#################################################################################################################################### -# Perl includes -#################################################################################################################################### -use strict; -use warnings FATAL => qw(all); -use Carp qw(confess); - -use Storable qw(dclone); - -use pgBackRest::Archive::Common; -use pgBackRest::Common::Exception; -use pgBackRest::Common::Log; -use pgBackRest::Config::Config; -use pgBackRest::DbVersion; -use pgBackRest::Protocol::Storage::Helper; - -use pgBackRestTest::Env::Host::HostBackupTest; - -#################################################################################################################################### -# run -#################################################################################################################################### -sub run -{ - my $self = shift; - my $strModule = 'ArchiveCommon'; - - ################################################################################################################################ - if ($self->begin("${strModule}::lsnFileRange()")) - { - $self->testResult(sub {lsnFileRange("1/60", "1/60", PG_VERSION_92, 16 * 1024 * 1024)}, "0000000100000000", 'get single'); - $self->testResult( - sub {lsnFileRange("1/FD000000", "2/1000000", PG_VERSION_92, 16 * 1024 * 1024)}, - "(00000001000000FD, 00000001000000FE, 0000000200000000, 0000000200000001)", 'get range < 9.3'); - $self->testResult( - sub {lsnFileRange("1/FD000000", "2/60", PG_VERSION_93, 16 * 1024 * 1024)}, - "(00000001000000FD, 00000001000000FE, 00000001000000FF, 0000000200000000)", 'get range >= 9.3'); - $self->testResult( - sub {lsnFileRange("A/800", "B/C0000000", PG_VERSION_11, 1024 * 1024 * 1024)}, - '(0000000A00000000, 0000000A00000001, 0000000A00000002, 0000000A00000003, 0000000B00000000, 0000000B00000001, ' . - '0000000B00000002, 0000000B00000003)', - 'get range >= 11/1GB'); - $self->testResult( - sub {lsnFileRange("7/FFEFFFFF", "8/001AAAAA", PG_VERSION_11, 1024 * 1024)}, - '(0000000700000FFE, 0000000700000FFF, 0000000800000000, 0000000800000001)', 'get range >= 11/1MB'); - } - - ################################################################################################################################ - if ($self->begin("${strModule}::walIsSegment()")) - { - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {walIsSegment('0000000200ABCDEF0000001')}, false, 'invalid segment'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {walIsSegment('0000000200ABCDEF00000001')}, true, 'valid segment'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {walIsSegment('000000010000000100000001.partial')}, true, 'valid partial segment'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {walIsSegment('00000001.history')}, false, 'valid history file'); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult(sub {walIsSegment('000000020000000100000001.00000028.backup')}, false, 'valid backup file'); - } - - ################################################################################################################################ - if ($self->begin("${strModule}::walIsPartial()")) - { - #--------------------------------------------------------------------------------------------------------------------------- - my $strWalSegment = '0000000200ABCDEF00000001'; - - $self->testResult(sub {walIsPartial($strWalSegment)}, false, "${strWalSegment} WAL is not partial"); - - #--------------------------------------------------------------------------------------------------------------------------- - $strWalSegment = $strWalSegment . '.partial'; - - $self->testResult(sub {walIsPartial($strWalSegment)}, true, "${strWalSegment} WAL is partial"); - } - - ################################################################################################################################ - if ($self->begin("${strModule}::walSegmentFind()")) - { - $self->optionTestSet(CFGOPT_STANZA, $self->stanza()); - $self->optionTestSet(CFGOPT_REPO_PATH, $self->testPath()); - $self->configTestLoad(CFGCMD_ARCHIVE_PUSH); - - my $strArchiveId = '9.4-1'; - my $strArchivePath = storageRepo()->pathGet(STORAGE_REPO_ARCHIVE . "/${strArchiveId}"); - - #--------------------------------------------------------------------------------------------------------------------------- - my $strWalSegment = '000000010000000100000001ZZ'; - - $self->testException( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, ERROR_ASSERT, - "${strWalSegment} is not a WAL segment"); - - #--------------------------------------------------------------------------------------------------------------------------- - $strWalSegment = '000000010000000100000001'; - - $self->testResult( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, undef, "${strWalSegment} WAL not found"); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testException( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment, .1)}, ERROR_ARCHIVE_TIMEOUT, - "could not find WAL segment ${strWalSegment} after 0.1 second(s)" . - "\nHINT: is archive_command configured correctly?" . - "\nHINT: use the check command to verify that PostgreSQL is archiving."); - - #--------------------------------------------------------------------------------------------------------------------------- - my $strWalMajorPath = "${strArchivePath}/" . substr($strWalSegment, 0, 16); - my $strWalSegmentHash = "${strWalSegment}-53aa5d59515aa7288ae02ba414c009aed1ca73ad"; - - storageRepo()->pathCreate($strWalMajorPath, {bCreateParent => true}); - storageRepo()->put("${strWalMajorPath}/${strWalSegmentHash}"); - - $self->testResult( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, $strWalSegmentHash, "${strWalSegment} WAL found"); - - #--------------------------------------------------------------------------------------------------------------------------- - my $strWalSegmentHash2 = "${strWalSegment}-a0b0d38b8aa263e25b8ff52a0a4ba85b6be97f9b.gz"; - - storageRepo()->put("${strWalMajorPath}/${strWalSegmentHash2}"); - - $self->testException( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, ERROR_ARCHIVE_DUPLICATE, - "duplicates found in archive for WAL segment ${strWalSegment}: ${strWalSegmentHash}, ${strWalSegmentHash2}"); - - storageRepo()->remove("${strWalMajorPath}/${strWalSegmentHash}"); - - #--------------------------------------------------------------------------------------------------------------------------- - $self->testResult( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, $strWalSegmentHash2, - "${strWalSegment} WAL found with compressed extension"); - - storageRepo()->remove("${strWalMajorPath}/${strWalSegmentHash2}"); - - #--------------------------------------------------------------------------------------------------------------------------- - $strWalSegment = $strWalSegment . '.partial'; - $strWalSegmentHash = "${strWalSegment}-996195c807713ef9262170043e7222cb150aef70"; - storageRepo()->put("${strWalMajorPath}/${strWalSegmentHash}"); - - $self->testResult( - sub {walSegmentFind(storageRepo(), $strArchiveId, $strWalSegment)}, $strWalSegmentHash, "${strWalSegment} WAL found"); - } -} - -1; diff --git a/test/lib/pgBackRestTest/Module/Mock/MockAllTest.pm b/test/lib/pgBackRestTest/Module/Mock/MockAllTest.pm index 183211f7c..6abcc83db 100644 --- a/test/lib/pgBackRestTest/Module/Mock/MockAllTest.pm +++ b/test/lib/pgBackRestTest/Module/Mock/MockAllTest.pm @@ -516,7 +516,7 @@ sub run $oHostBackup->backup( $strType, 'invalid repo', {oExpectedManifest => \%oManifest, strOptionalParam => '--' . cfgOptionName(CFGOPT_REPO_PATH) . '=/bogus_path', - iExpectedExitStatus => $bS3 ? ERROR_FILE_MISSING : ERROR_PATH_MISSING}); + iExpectedExitStatus => ERROR_FILE_MISSING}); # Restore - tests various mode, extra files/paths, missing files/paths #--------------------------------------------------------------------------------------------------------------------------- @@ -822,7 +822,7 @@ sub run $oHostBackup->backup( $strType, '$PGDATA is a substring of valid tblspc excluding / (file missing err expected)', - {oExpectedManifest => \%oManifest, iExpectedExitStatus => ERROR_PATH_MISSING}); + {oExpectedManifest => \%oManifest, iExpectedExitStatus => ERROR_FILE_OPEN}); testFileRemove("${strTblSpcPath}/99999"); } diff --git a/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm b/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm index 099a301d8..bdc2744db 100644 --- a/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm +++ b/test/lib/pgBackRestTest/Module/Real/RealAllTest.pm @@ -475,7 +475,7 @@ sub run my $strStandbyBackup = $oHostBackup->backup( CFGOPTVAL_BACKUP_TYPE_FULL, 'backup from standby, failure to access at least one standby', {bStandby => true, - iExpectedExitStatus => ERROR_HOST_CONNECT, + iExpectedExitStatus => ERROR_DB_CONNECT, strOptionalParam => '--' . cfgOptionName(cfgOptionIdFromIndex(CFGOPT_PG_HOST, cfgOptionIndexTotal(CFGOPT_PG_PATH))) . '=' . BOGUS}); } @@ -535,7 +535,9 @@ sub run $oHostDbMaster->stop(); - $oHostBackup->backup(CFGOPTVAL_BACKUP_TYPE_INCR, 'attempt backup when stopped', {iExpectedExitStatus => ERROR_STOP}); + $oHostBackup->backup( + CFGOPTVAL_BACKUP_TYPE_INCR, 'attempt backup when stopped', + {iExpectedExitStatus => $oHostBackup == $oHostDbMaster ? ERROR_STOP : ERROR_DB_CONNECT}); $oHostDbMaster->start(); } diff --git a/test/src/module/command/backupTest.c b/test/src/module/command/backupTest.c index 53a4c78f7..b8b5b3f51 100644 --- a/test/src/module/command/backupTest.c +++ b/test/src/module/command/backupTest.c @@ -1,6 +1,10 @@ /*********************************************************************************************************************************** Test Backup Command ***********************************************************************************************************************************/ +#include + +#include "command/stanza/create.h" +#include "command/stanza/upgrade.h" #include "common/io/bufferRead.h" #include "common/io/bufferWrite.h" #include "common/io/io.h" @@ -8,6 +12,372 @@ Test Backup Command #include "storage/posix/storage.h" #include "common/harnessConfig.h" +#include "common/harnessPq.h" + +/*********************************************************************************************************************************** +Page header structure use to create realistic pages for testing +***********************************************************************************************************************************/ +typedef struct +{ + uint32_t walid; // high bits + uint32_t xrecoff; // low bits +} PageWalRecPtr; + +typedef struct PageHeaderData +{ + // LSN is member of *any* block, not only page-organized ones + PageWalRecPtr pd_lsn; // Lsn for last change to this page + uint16_t pd_checksum; // checksum + uint16_t pd_flags; // flag bits, see below + uint16_t pd_lower; // offset to start of free space + uint16_t pd_upper; // offset to end of free space +} PageHeaderData; + +/*********************************************************************************************************************************** +Get a list of all files in the backup +***********************************************************************************************************************************/ +typedef struct TestBackupValidateCallbackData +{ + const Storage *storage; // Storage object when needed (e.g. fileCompressed = true) + const String *path; // Subpath when storage is specified + const Manifest *manifest; // Manifest check for files/links/paths + String *content; // String where content should be added +} TestBackupValidateCallbackData; + +void +testBackupValidateCallback(void *callbackData, const StorageInfo *info) +{ + TestBackupValidateCallbackData *data = callbackData; + + // Don't include . when it is a path (we'll still include it when it is a link so we can see the destination) + if (info->type == storageTypePath && strEq(info->name, DOT_STR)) + return; + + // Don't include backup.manifest or copy. We'll test that they are present elsewhere + if (info->type == storageTypeFile && + (strEqZ(info->name, BACKUP_MANIFEST_FILE) || strEqZ(info->name, BACKUP_MANIFEST_FILE INFO_COPY_EXT))) + return; + + strCatFmt(data->content, "%s {", strPtr(info->name)); + + switch (info->type) + { + case storageTypeFile: + { + strCat(data->content, "file"); + + uint64_t size = info->size; + const String *manifestName = info->name; + + // If the file is compressed then decompress to get the real size + if (strEndsWithZ(info->name, "." GZIP_EXT)) + { + ASSERT(data->storage != NULL); + + StorageRead *read = storageNewReadP( + data->storage, + data->path != NULL ? strNewFmt("%s/%s", strPtr(data->path), strPtr(info->name)) : info->name); + ioFilterGroupAdd(ioReadFilterGroup(storageReadIo(read)), gzipDecompressNew(false)); + size = bufUsed(storageGetP(read)); + + manifestName = strSubN(info->name, 0, strSize(info->name) - strlen("." GZIP_EXT)); + } + + strCatFmt(data->content, ", s=%" PRIu64, size); + + // Check against the manifest + const ManifestFile *file = manifestFileFind(data->manifest, manifestName); + + if (size != file->size) + THROW_FMT(AssertError, "'%s' size does match manifest", strPtr(manifestName)); + + if (info->size != file->sizeRepo) + THROW_FMT(AssertError, "'%s' repo size does match manifest", strPtr(manifestName)); + + if (info->mode != 0640) + THROW_FMT(AssertError, "'%s' mode is not 0640", strPtr(manifestName)); + + if (!strEqZ(info->user, testUser())) + THROW_FMT(AssertError, "'%s' user should be '%s'", strPtr(manifestName), testUser()); + + if (!strEqZ(info->group, testGroup())) + THROW_FMT(AssertError, "'%s' group should be '%s'", strPtr(manifestName), testGroup()); + + break; + } + + case storageTypeLink: + { + strCatFmt(data->content, "link, d=%s", strPtr(info->linkDestination)); + break; + } + + case storageTypePath: + { + strCat(data->content, "path"); + + // Check against the manifest + manifestPathFind(data->manifest, info->name); + + if (info->mode != 0750) + THROW_FMT(AssertError, "'%s' mode is not 00750", strPtr(info->name)); + + if (!strEqZ(info->user, testUser())) + THROW_FMT(AssertError, "'%s' user should be '%s'", strPtr(info->name), testUser()); + + if (!strEqZ(info->group, testGroup())) + THROW_FMT(AssertError, "'%s' group should be '%s'", strPtr(info->name), testGroup()); + + break; + } + + case storageTypeSpecial: + { + THROW_FMT(AssertError, "unexpected special file '%s'", strPtr(info->name)); + break; + } + } + + strCat(data->content, "}\n"); +} + +static String * +testBackupValidate(const Storage *storage, const String *path) +{ + FUNCTION_HARNESS_BEGIN(); + FUNCTION_HARNESS_PARAM(STORAGE, storage); + FUNCTION_HARNESS_PARAM(STRING, path); + FUNCTION_HARNESS_END(); + + String *result = strNew(""); + + MEM_CONTEXT_TEMP_BEGIN() + { + // Make sure both backup.manifest files exist + if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strPtr(path)))) + THROW(AssertError, BACKUP_MANIFEST_FILE " is missing"); + + if (!storageExistsP(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(path)))) + THROW(AssertError, BACKUP_MANIFEST_FILE INFO_COPY_EXT " is missing"); + + // Build a list of files in the backup path and verify against the manifest + Manifest *manifest = manifestLoadFile(storage, strNewFmt("%s/" BACKUP_MANIFEST_FILE, strPtr(path)), cipherTypeNone, NULL); + + TestBackupValidateCallbackData callbackData = + { + .storage = storage, + .path = path, + .content = result, + .manifest = manifest, + }; + + storageInfoListP(storage, path, testBackupValidateCallback, &callbackData, .recurse = true, .sortOrder = sortOrderAsc); + } + MEM_CONTEXT_TEMP_END(); + + FUNCTION_HARNESS_RESULT(STRING, result); +} + +/*********************************************************************************************************************************** +Generate pq scripts for versions of PostgreSQL +***********************************************************************************************************************************/ +typedef struct TestBackupPqScriptParam +{ + VAR_PARAM_HEADER; + bool startFast; + bool backupStandby; + bool errorAfterStart; + bool noWal; // Don't write test WAL segments + bool walCompress; // Compress the archive files + unsigned int walTotal; // Total WAL to write +} TestBackupPqScriptParam; + +#define testBackupPqScriptP(pgVersion, backupStartTime, ...) \ + testBackupPqScript(pgVersion, backupStartTime, (TestBackupPqScriptParam){VAR_PARAM_INIT, __VA_ARGS__}) + +static void +testBackupPqScript(unsigned int pgVersion, time_t backupTimeStart, TestBackupPqScriptParam param) +{ + const char *pg1Path = strPtr(strNewFmt("%s/pg1", testPath())); + const char *pg2Path = strPtr(strNewFmt("%s/pg2", testPath())); + + // Read pg_control to get info about the cluster + PgControl pgControl = pgControlFromFile(storagePg()); + + // Set archive timeout really small to save time on errors + cfgOptionSet(cfgOptArchiveTimeout, cfgSourceParam, varNewDbl(.1)); + + uint64_t lsnStart = ((uint64_t)backupTimeStart & 0xFFFFFF00) << 28; + uint64_t lsnStop = + lsnStart + ((param.walTotal == 0 ? 0 : param.walTotal - 1) * pgControl.walSegmentSize) + (pgControl.walSegmentSize / 2); + + const char *lsnStartStr = strPtr(pgLsnToStr(lsnStart)); + const char *walSegmentStart = strPtr(pgLsnToWalSegment(1, lsnStart, pgControl.walSegmentSize)); + const char *lsnStopStr = strPtr(pgLsnToStr(lsnStop)); + const char *walSegmentStop = strPtr(pgLsnToWalSegment(1, lsnStop, pgControl.walSegmentSize)); + + // Write WAL segments to the archive + // ----------------------------------------------------------------------------------------------------------------------------- + if (!param.noWal) + { + InfoArchive *infoArchive = infoArchiveLoadFile(storageRepo(), INFO_ARCHIVE_PATH_FILE_STR, cipherTypeNone, NULL); + const String *archiveId = infoArchiveId(infoArchive); + StringList *walSegmentList = pgLsnRangeToWalSegmentList(pgControl.version, 1, lsnStart, lsnStop, pgControl.walSegmentSize); + + Buffer *walBuffer = bufNew((size_t)pgControl.walSegmentSize); + bufUsedSet(walBuffer, bufSize(walBuffer)); + memset(bufPtr(walBuffer), 0, bufSize(walBuffer)); + pgWalTestToBuffer((PgWal){.version = pgControl.version, .systemId = pgControl.systemId}, walBuffer); + const String *walChecksum = bufHex(cryptoHashOne(HASH_TYPE_SHA1_STR, walBuffer)); + + for (unsigned int walSegmentIdx = 0; walSegmentIdx < strLstSize(walSegmentList); walSegmentIdx++) + { + StorageWrite *write = storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_ARCHIVE "/%s/%s-%s%s", strPtr(archiveId), strPtr(strLstGet(walSegmentList, walSegmentIdx)), + strPtr(walChecksum), param.walCompress ? "." GZIP_EXT : "")); + + if (param.walCompress) + ioFilterGroupAdd(ioWriteFilterGroup(storageWriteIo(write)), gzipCompressNew(1, false)); + + storagePutP(write, walBuffer); + } + } + + // ----------------------------------------------------------------------------------------------------------------------------- + if (pgVersion == PG_VERSION_95) + { + ASSERT(!param.backupStandby); + ASSERT(!param.errorAfterStart); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_95, pg1Path, false, NULL, NULL), + + // Get start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000), + + // Start backup + HRNPQ_MACRO_ADVISORY_LOCK(1, true), + HRNPQ_MACRO_START_BACKUP_84_95(1, param.startFast, lsnStartStr, walSegmentStart), + HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"), + HRNPQ_MACRO_TABLESPACE_LIST_0(1), + + // Get copy start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999), + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000), + + // Stop backup + HRNPQ_MACRO_STOP_BACKUP_LE_95(1, lsnStopStr, walSegmentStop), + + // Get stop time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000), + + HRNPQ_MACRO_DONE() + }); + } + // ----------------------------------------------------------------------------------------------------------------------------- + else if (pgVersion == PG_VERSION_96) + { + ASSERT(param.backupStandby); + ASSERT(!param.errorAfterStart); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_96, pg1Path, false, NULL, NULL), + + // Connect to standby + HRNPQ_MACRO_OPEN_GE_92(2, "dbname='postgres' port=5433", PG_VERSION_96, pg2Path, true, NULL, NULL), + + // Get start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000), + + // Start backup + HRNPQ_MACRO_ADVISORY_LOCK(1, true), + HRNPQ_MACRO_START_BACKUP_96(1, true, lsnStartStr, walSegmentStart), + HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"), + HRNPQ_MACRO_TABLESPACE_LIST_0(1), + + // Wait for standby to sync + HRNPQ_MACRO_REPLAY_WAIT_96(2, lsnStartStr), + + // Get copy start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999), + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000), + + // Stop backup + HRNPQ_MACRO_STOP_BACKUP_96(1, lsnStopStr, walSegmentStop, false), + + // Get stop time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000), + + HRNPQ_MACRO_DONE() + }); + } + // ----------------------------------------------------------------------------------------------------------------------------- + else if (pgVersion == PG_VERSION_11) + { + ASSERT(!param.backupStandby); + + if (param.errorAfterStart) + { + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL), + + // Get start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000), + + // Start backup + HRNPQ_MACRO_ADVISORY_LOCK(1, true), + HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart), + HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"), + HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"), + + // Get copy start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999), + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000), + + HRNPQ_MACRO_DONE() + }); + } + else + { + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_11, pg1Path, false, NULL, NULL), + + // Get start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000), + + // Start backup + HRNPQ_MACRO_ADVISORY_LOCK(1, true), + HRNPQ_MACRO_START_BACKUP_GE_10(1, param.startFast, lsnStartStr, walSegmentStart), + HRNPQ_MACRO_DATABASE_LIST_1(1, "test1"), + HRNPQ_MACRO_TABLESPACE_LIST_1(1, 32768, "tblspc32768"), + + // Get copy start time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 999), + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 1000), + + // Stop backup + HRNPQ_MACRO_STOP_BACKUP_GE_10(1, lsnStopStr, walSegmentStop, false), + + // Get stop time + HRNPQ_MACRO_TIME_QUERY(1, (int64_t)backupTimeStart * 1000 + 2000), + + HRNPQ_MACRO_DONE() + }); + } + } + else + THROW_FMT(AssertError, "unsupported test version %u", pgVersion); // {uncoverable - no invalid versions in tests} +}; /*********************************************************************************************************************************** Test Run @@ -17,6 +387,12 @@ testRun(void) { FUNCTION_HARNESS_VOID(); + // The tests expect the timezone to be UTC + setenv("TZ", "UTC", true); + + Storage *storageTest = storagePosixNew( + strNew(testPath()), STORAGE_MODE_FILE_DEFAULT, STORAGE_MODE_PATH_DEFAULT, true, NULL); + // Start a protocol server to test the protocol directly Buffer *serverWrite = bufNew(8192); IoWrite *serverWriteIo = ioBufferWriteNew(serverWrite); @@ -71,14 +447,14 @@ testRun(void) varLstAdd(paramList, varNewUInt64(0)); // pgFileSize varLstAdd(paramList, NULL); // pgFileChecksum varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 1 - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 2 + varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit varLstAdd(paramList, varNewStr(missingFile)); // repoFile varLstAdd(paramList, varNewBool(false)); // repoFileHasReference varLstAdd(paramList, varNewBool(false)); // repoFileCompress varLstAdd(paramList, varNewUInt(0)); // repoFileCompressLevel varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel varLstAdd(paramList, varNewBool(false)); // delta + varLstAdd(paramList, NULL); // cipherSubPass TEST_RESULT_BOOL( backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - skip"); @@ -146,14 +522,14 @@ testRun(void) varLstAdd(paramList, varNewUInt64(9)); // pgFileSize varLstAdd(paramList, NULL); // pgFileChecksum varLstAdd(paramList, varNewBool(true)); // pgFileChecksumPage - varLstAdd(paramList, varNewUInt64(0xFFFFFFFF)); // pgFileChecksumPageLsnLimit 1 - varLstAdd(paramList, varNewUInt64(0xFFFFFFFF)); // pgFileChecksumPageLsnLimit 2 + varLstAdd(paramList, varNewUInt64(0xFFFFFFFFFFFFFFFF)); // pgFileChecksumPageLsnLimit varLstAdd(paramList, varNewStr(pgFile)); // repoFile varLstAdd(paramList, varNewBool(false)); // repoFileHasReference varLstAdd(paramList, varNewBool(false)); // repoFileCompress varLstAdd(paramList, varNewUInt(1)); // repoFileCompressLevel varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel varLstAdd(paramList, varNewBool(false)); // delta + varLstAdd(paramList, NULL); // cipherSubPass TEST_RESULT_BOOL( backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - pageChecksum"); @@ -188,14 +564,14 @@ testRun(void) varLstAdd(paramList, varNewUInt64(9)); // pgFileSize varLstAdd(paramList, varNewStrZ("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")); // pgFileChecksum varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 1 - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 2 + varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit varLstAdd(paramList, varNewStr(pgFile)); // repoFile varLstAdd(paramList, varNewBool(true)); // repoFileHasReference varLstAdd(paramList, varNewBool(false)); // repoFileCompress varLstAdd(paramList, varNewUInt(1)); // repoFileCompressLevel varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel varLstAdd(paramList, varNewBool(true)); // delta + varLstAdd(paramList, NULL); // cipherSubPass TEST_RESULT_BOOL( backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - noop"); @@ -328,14 +704,14 @@ testRun(void) varLstAdd(paramList, varNewUInt64(9)); // pgFileSize varLstAdd(paramList, varNewStrZ("9bc8ab2dda60ef4beed07d1e19ce0676d5edde67")); // pgFileChecksum varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 1 - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 2 + varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit varLstAdd(paramList, varNewStr(pgFile)); // repoFile varLstAdd(paramList, varNewBool(false)); // repoFileHasReference varLstAdd(paramList, varNewBool(true)); // repoFileCompress varLstAdd(paramList, varNewUInt(3)); // repoFileCompressLevel varLstAdd(paramList, varNewStr(backupLabel)); // backupLabel varLstAdd(paramList, varNewBool(false)); // delta + varLstAdd(paramList, NULL); // cipherSubPass TEST_RESULT_BOOL( backupProtocol(PROTOCOL_COMMAND_BACKUP_FILE_STR, paramList, server), true, "protocol backup file - copy, compress"); @@ -447,8 +823,7 @@ testRun(void) varLstAdd(paramList, varNewUInt64(9)); // pgFileSize varLstAdd(paramList, varNewStrZ("1234567890123456789012345678901234567890")); // pgFileChecksum varLstAdd(paramList, varNewBool(false)); // pgFileChecksumPage - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 1 - varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit 2 + varLstAdd(paramList, varNewUInt64(0)); // pgFileChecksumPageLsnLimit varLstAdd(paramList, varNewStr(pgFile)); // repoFile varLstAdd(paramList, varNewBool(false)); // repoFileHasReference varLstAdd(paramList, varNewBool(false)); // repoFileCompress @@ -465,5 +840,1433 @@ testRun(void) bufUsedSet(serverWrite, 0); } + // ***************************************************************************************************************************** + if (testBegin("backupLabelCreate()")) + { + const String *pg1Path = strNewFmt("%s/pg1", testPath()); + const String *repoPath = strNewFmt("%s/repo", testPath()); + + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + harnessCfgLoad(cfgCmdBackup, argList); + + time_t timestamp = 1575401652; + String *backupLabel = backupLabelFormat(backupTypeFull, NULL, timestamp); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("assign label when no history"); + + storagePathCreateP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/backup.history/2019")); + + TEST_RESULT_STR_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("assign label when history is older"); + + storagePutP( + storageNewWriteP( + storageRepoWrite(), + strNewFmt( + STORAGE_REPO_BACKUP "/backup.history/2019/%s.manifest.gz", + strPtr(backupLabelFormat(backupTypeFull, NULL, timestamp - 4)))), + NULL); + + TEST_RESULT_STR_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("assign label when backup is older"); + + storagePutP( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(backupLabelFormat(backupTypeFull, NULL, timestamp - 2)))), + NULL); + + TEST_RESULT_STR_STR(backupLabelCreate(backupTypeFull, NULL, timestamp), backupLabel, "create label"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("advance time when backup is same"); + + storagePutP( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(backupLabelFormat(backupTypeFull, NULL, timestamp)))), + NULL); + + TEST_RESULT_STR_Z(backupLabelCreate(backupTypeFull, NULL, timestamp), "20191203-193413F", "create label"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when new label is in the past even with advanced time"); + + storagePutP( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s", strPtr(backupLabelFormat(backupTypeFull, NULL, timestamp + 1)))), + NULL); + + TEST_ERROR( + backupLabelCreate(backupTypeFull, NULL, timestamp), FormatError, + "new backup label '20191203-193413F' is not later than latest backup label '20191203-193413F'\n" + "HINT: has the timezone changed?\n" + "HINT: is there clock skew?"); + } + + // ***************************************************************************************************************************** + if (testBegin("backupInit()")) + { + const String *pg1Path = strNewFmt("%s/pg1", testPath()); + const String *repoPath = strNewFmt("%s/repo", testPath()); + + // Set log level to detail + harnessLogLevelSet(logLevelDetail); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when backup from standby is not supported"); + + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_ERROR( + backupInit(infoBackupNew(PG_VERSION_91, 1000000000000000910, NULL)), ConfigError, + "option 'backup-standby' not valid for PostgreSQL < 9.2"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("warn and reset when backup from standby used in offline mode"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_92, .systemId = 1000000000000000920})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_RESULT_VOID(backupInit(infoBackupNew(PG_VERSION_92, 1000000000000000920, NULL)), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptBackupStandby), false, " check backup-standby"); + + TEST_RESULT_LOG( + "P00 WARN: option backup-standby is enabled but backup is offline - backups will be performed from the primary"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when pg_control does not match stanza"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_10, .systemId = 1000000000000001000})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_ERROR( + backupInit(infoBackupNew(PG_VERSION_11, 1000000000000001100, NULL)), BackupMismatchError, + "PostgreSQL version 10, system-id 1000000000000001000 do not match stanza version 11, system-id 1000000000000001100\n" + "HINT: is this the correct stanza?"); + TEST_ERROR( + backupInit(infoBackupNew(PG_VERSION_10, 1000000000000001100, NULL)), BackupMismatchError, + "PostgreSQL version 10, system-id 1000000000000001000 do not match stanza version 10, system-id 1000000000000001100\n" + "HINT: is this the correct stanza?"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("reset start-fast when PostgreSQL < 8.4"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_83, .systemId = 1000000000000000830})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--" CFGOPT_START_FAST); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_RESULT_VOID(backupInit(infoBackupNew(PG_VERSION_83, 1000000000000000830, NULL)), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptStartFast), false, " check start-fast"); + + TEST_RESULT_LOG("P00 WARN: start-fast option is only available in PostgreSQL >= 8.4"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("reset stop-auto when PostgreSQL < 9.3 or PostgreSQL > 9.5"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_84, .systemId = 1000000000000000840})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--" CFGOPT_STOP_AUTO); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_RESULT_VOID(backupInit(infoBackupNew(PG_VERSION_84, 1000000000000000840, NULL)), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptStopAuto), false, " check stop-auto"); + + TEST_RESULT_LOG("P00 WARN: stop-auto option is only available in PostgreSQL >= 9.3 and <= 9.5"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 1000000000000000960})); + + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_RESULT_VOID(backupInit(infoBackupNew(PG_VERSION_96, 1000000000000000960, NULL)), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptStopAuto), false, " check stop-auto"); + + TEST_RESULT_LOG("P00 WARN: stop-auto option is only available in PostgreSQL >= 9.3 and <= 9.5"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_95, .systemId = 1000000000000000950})); + + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_RESULT_VOID(backupInit(infoBackupNew(PG_VERSION_95, 1000000000000000950, NULL)), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptStopAuto), true, " check stop-auto"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("reset checksum-page when the cluster does not have checksums enabled"); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_CHECKSUM_PAGE); + harnessCfgLoad(cfgCmdBackup, argList); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_96, strPtr(pg1Path), false, NULL, NULL), + + HRNPQ_MACRO_DONE() + }); + + TEST_RESULT_VOID(dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, NULL))->dbPrimary), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page"); + + TEST_RESULT_LOG( + "P00 WARN: checksum-page option set to true but checksums are not enabled on the cluster, resetting to false"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("ok if cluster checksums are enabled and checksum-page is any value"); + + // Create pg_control with page checksums + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93, .pageChecksum = true})); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_CHECKSUM_PAGE); + harnessCfgLoad(cfgCmdBackup, argList); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_96, strPtr(pg1Path), false, NULL, NULL), + + HRNPQ_MACRO_DONE() + }); + + TEST_RESULT_VOID(dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, NULL))->dbPrimary), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page"); + + // Create pg_control without page checksums + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93})); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_96, strPtr(pg1Path), false, NULL, NULL), + + HRNPQ_MACRO_DONE() + }); + + TEST_RESULT_VOID(dbFree(backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, NULL))->dbPrimary), "backup init"); + TEST_RESULT_BOOL(cfgOptionBool(cfgOptChecksumPage), false, " check checksum-page"); + } + + // ***************************************************************************************************************************** + if (testBegin("backupTime()")) + { + const String *pg1Path = strNewFmt("%s/pg1", testPath()); + const String *repoPath = strNewFmt("%s/repo", testPath()); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when second does not advance after sleep"); + + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + harnessCfgLoad(cfgCmdBackup, argList); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_93, .systemId = PG_VERSION_93})); + + harnessPqScriptSet((HarnessPq []) + { + // Connect to primary + HRNPQ_MACRO_OPEN_GE_92(1, "dbname='postgres' port=5432", PG_VERSION_96, strPtr(pg1Path), false, NULL, NULL), + + // Don't advance time after wait + HRNPQ_MACRO_TIME_QUERY(1, 1575392588998), + HRNPQ_MACRO_TIME_QUERY(1, 1575392588999), + + HRNPQ_MACRO_DONE() + }); + + BackupData *backupData = backupInit(infoBackupNew(PG_VERSION_93, PG_VERSION_93, NULL)); + + TEST_ERROR(backupTime(backupData, true), AssertError, "invalid sleep for online backup time with wait remainder"); + dbFree(backupData->dbPrimary); + } + + // ***************************************************************************************************************************** + if (testBegin("backupResumeFind()")) + { + const String *repoPath = strNewFmt("%s/repo", testPath()); + + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAddZ(argList, "--" CFGOPT_PG1_PATH "=/pg"); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + harnessCfgLoad(cfgCmdBackup, argList); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume empty directory"); + + storagePathCreateP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")); + + TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume when resume is disabled"); + + cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_FALSE_VAR); + + storagePutP( + storageNewWriteP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)), + NULL); + + TEST_RESULT_PTR(backupResumeFind((Manifest *)1, NULL), NULL, "find resumable backup"); + + TEST_RESULT_LOG( + "P00 WARN: backup '20191003-105320F' cannot be resumed: resume is disabled"); + + TEST_RESULT_BOOL( + storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed"); + + cfgOptionSet(cfgOptResume, cfgSourceParam, BOOL_TRUE_VAR); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume when pgBackRest version has changed"); + + Manifest *manifestResume = manifestNewInternal(); + manifestResume->info = infoNew(NULL); + manifestResume->data.backupType = backupTypeFull; + manifestResume->data.backupLabel = STRDEF("20191003-105320F"); + manifestResume->data.pgVersion = PG_VERSION_12; + + manifestTargetAdd(manifestResume, &(ManifestTarget){.name = MANIFEST_TARGET_PGDATA_STR, .path = STRDEF("/pg")}); + manifestPathAdd(manifestResume, &(ManifestPath){.name = MANIFEST_TARGET_PGDATA_STR}); + manifestFileAdd(manifestResume, &(ManifestFile){.name = STRDEF("pg_data/" PG_FILE_PGVERSION)}); + + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)))); + + Manifest *manifest = manifestNewInternal(); + manifest->data.backupType = backupTypeFull; + manifest->data.backrestVersion = STRDEF("BOGUS"); + + TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup"); + + TEST_RESULT_LOG( + "P00 WARN: backup '20191003-105320F' cannot be resumed:" + " new pgBackRest version 'BOGUS' does not match resumable pgBackRest version '" PROJECT_VERSION "'"); + + TEST_RESULT_BOOL( + storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed"); + + manifest->data.backrestVersion = STRDEF(PROJECT_VERSION); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume when backup labels do not match (resumable is null)"); + + manifest->data.backupType = backupTypeFull; + manifest->data.backupLabelPrior = STRDEF("20191003-105320F"); + + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)))); + + TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup"); + + TEST_RESULT_LOG( + "P00 WARN: backup '20191003-105320F' cannot be resumed:" + " new prior backup label '' does not match resumable prior backup label '20191003-105320F'"); + + TEST_RESULT_BOOL( + storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed"); + + manifest->data.backupLabelPrior = NULL; + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume when backup labels do not match (new is null)"); + + manifest->data.backupType = backupTypeFull; + manifestResume->data.backupLabelPrior = STRDEF("20191003-105320F"); + + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)))); + + TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup"); + + TEST_RESULT_LOG( + "P00 WARN: backup '20191003-105320F' cannot be resumed:" + " new prior backup label '20191003-105320F' does not match resumable prior backup label ''"); + + TEST_RESULT_BOOL( + storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed"); + + manifestResume->data.backupLabelPrior = NULL; + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("cannot resume when compression does not match"); + + manifestResume->data.backupOptionCompress = true; + + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F/" BACKUP_MANIFEST_FILE INFO_COPY_EXT)))); + + TEST_RESULT_PTR(backupResumeFind(manifest, NULL), NULL, "find resumable backup"); + + TEST_RESULT_LOG( + "P00 WARN: backup '20191003-105320F' cannot be resumed:" + " new compression 'false' does not match resumable compression 'true'"); + + TEST_RESULT_BOOL( + storagePathExistsP(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191003-105320F")), false, "check backup path removed"); + + manifestResume->data.backupOptionCompress = false; + } + + // ***************************************************************************************************************************** + if (testBegin("backupJobResult()")) + { + // Set log level to detail + harnessLogLevelSet(logLevelDetail); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when postmaster.pid exists"); + + ProtocolParallelJob *job = protocolParallelJobNew(VARSTRDEF("key"), protocolCommandNew(STRDEF("command"))); + protocolParallelJobErrorSet(job, errorTypeCode(&AssertError), STRDEF("error message")); + + TEST_ERROR(backupJobResult((Manifest *)1, NULL, STRDEF("log"), job, 0, 0, 0), AssertError, "error message"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("remove skipped file"); + + // Create job that skips file + job = protocolParallelJobNew(VARSTRDEF("pg_data/test"), protocolCommandNew(STRDEF("command"))); + + VariantList *result = varLstNew(); + varLstAdd(result, varNewUInt64(backupCopyResultSkip)); + varLstAdd(result, varNewUInt64(0)); + varLstAdd(result, varNewUInt64(0)); + varLstAdd(result, NULL); + varLstAdd(result, NULL); + + protocolParallelJobResultSet(job, varNewVarLst(result)); + + // Create manifest with file + Manifest *manifest = manifestNewInternal(); + manifestFileAdd(manifest, &(ManifestFile){.name = STRDEF("pg_data/test")}); + + TEST_RESULT_UINT(backupJobResult(manifest, STRDEF("host"), STRDEF("log-test"), job, 0, 0, 0), 0, "log skip result"); + + TEST_RESULT_LOG("P00 DETAIL: skip file removed by database host:log-test"); + } + + // Offline tests should only be used to test offline functionality and errors easily tested in offline mode + // ***************************************************************************************************************************** + if (testBegin("cmdBackup() offline")) + { + const String *pg1Path = strNewFmt("%s/pg1", testPath()); + const String *repoPath = strNewFmt("%s/repo", testPath()); + + // Set log level to detail + harnessLogLevelSet(logLevelDetail); + + // Replace backup labels since the times are not deterministic + hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}I", NULL, "INCR", true); + hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F_[0-9]{8}-[0-9]{6}D", NULL, "DIFF", true); + hrnLogReplaceAdd("[0-9]{8}-[0-9]{6}F", NULL, "FULL", true); + + // Create pg_control + storagePutP( + storageNewWriteP(storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path))), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_84, .systemId = 1000000000000000840})); + + // Create stanza + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdStanzaCreate, argList); + + cmdStanzaCreate(); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when postmaster.pid exists"); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdBackup, argList); + + storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_POSTMASTERPID_STR), BUFSTRDEF("PID")); + + TEST_ERROR( + cmdBackup(), PostmasterRunningError, + "--no-online passed but postmaster.pid exists - looks like the postmaster is running. Shutdown the postmaster and try" + " again, or use --force."); + + TEST_RESULT_LOG("P00 WARN: no prior backup exists, incr backup has been changed to full"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("offline full backup"); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + strLstAddZ(argList, "--" CFGOPT_FORCE); + harnessCfgLoad(cfgCmdBackup, argList); + + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("postgresql.conf")), BUFSTRDEF("CONFIGSTUFF")); + + TEST_RESULT_VOID(cmdBackup(), "backup"); + + TEST_RESULT_LOG_FMT( + "P00 WARN: no prior backup exists, incr backup has been changed to full\n" + "P00 WARN: --no-online passed and postmaster.pid exists but --force was passed so backup will continue though it" + " looks like the postmaster is running and the backup will probably not be consistent\n" + "P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, 99%%) checksum %s\n" + "P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, 100%%) checksum e3db315c260e79211b7b52587123b7aa060f30ab\n" + "P00 INFO: full backup size = 8KB\n" + "P00 INFO: new backup label = [FULL-1]", + TEST_64BIT() ? "21e2ddc99cdf4cfca272eee4f38891146092e358" : "8bb70506d988a8698d9e8cf90736ada23634571b"); + + // Remove postmaster.pid + storageRemoveP(storagePgWrite(), PG_FILE_POSTMASTERPID_STR, .errorOnMissing = true); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when no files have changed"); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--" CFGOPT_COMPRESS); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF); + harnessCfgLoad(cfgCmdBackup, argList); + + TEST_ERROR(cmdBackup(), FileMissingError, "no files have changed since the last backup - this seems unlikely"); + + TEST_RESULT_LOG( + "P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n" + "P00 WARN: diff backup cannot alter compress option to 'true', reset to value in [FULL-1]\n" + "P00 WARN: diff backup cannot alter hardlink option to 'true', reset to value in [FULL-1]"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("offline incr backup to test unresumable backup"); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + strLstAddZ(argList, "--" CFGOPT_CHECKSUM_PAGE); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR); + harnessCfgLoad(cfgCmdBackup, argList); + + storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR), BUFSTRDEF("VER")); + + TEST_RESULT_VOID(cmdBackup(), "backup"); + + TEST_RESULT_LOG( + "P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n" + "P00 WARN: incr backup cannot alter 'checksum-page' option to 'true', reset to 'false' from [FULL-1]\n" + "P00 WARN: backup '[DIFF-1]' cannot be resumed: new backup type 'incr' does not match resumable backup type 'diff'\n" + "P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, 100%) checksum c8663c2525f44b6d9c687fbceb4aafc63ed8b451\n" + "P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n" + "P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n" + "P00 INFO: incr backup size = 3B\n" + "P00 INFO: new backup label = [INCR-1]"); + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("offline diff backup to test prior backup must be full"); + + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF); + harnessCfgLoad(cfgCmdBackup, argList); + + sleepMSec(MSEC_PER_SEC - (timeMSec() % MSEC_PER_SEC)); + storagePutP(storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR), BUFSTRDEF("VR2")); + + TEST_RESULT_VOID(cmdBackup(), "backup"); + + TEST_RESULT_LOG( + "P00 INFO: last backup label = [FULL-1], version = " PROJECT_VERSION "\n" + "P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, 100%) checksum 6f1894088c578e4f0b9888e8e8a997d93cbbc0c5\n" + "P00 DETAIL: reference pg_data/global/pg_control to [FULL-1]\n" + "P00 DETAIL: reference pg_data/postgresql.conf to [FULL-1]\n" + "P00 INFO: diff backup size = 3B\n" + "P00 INFO: new backup label = [DIFF-2]"); + } + + // ***************************************************************************************************************************** + if (testBegin("cmdBackup() online")) + { + const String *pg1Path = strNewFmt("%s/pg1", testPath()); + const String *repoPath = strNewFmt("%s/repo", testPath()); + const String *pg2Path = strNewFmt("%s/pg2", testPath()); + + // Set log level to detail + harnessLogLevelSet(logLevelDetail); + + // Replace percent complete and backup size since they can cause a lot of churn when files are added/removed + hrnLogReplaceAdd(", [0-9]{1,3}%\\)", "[0-9]+%", "PCT", false); + hrnLogReplaceAdd(" backup size = [0-9]+[A-Z]+", "[^ ]+$", "SIZE", false); + + // Replace checksums since they can differ between architectures (e.g. 32/64 bit) + hrnLogReplaceAdd("\\) checksum [a-f0-9]{40}", "[a-f0-9]{40}$", "SHA1", false); + + // Backup start time epoch. The idea is to not have backup times (and therefore labels) ever change. Each backup added + // should be separated by 100,000 seconds (1,000,000 after stanza-upgrade) but after the initial assignments this will only + // be possible at the beginning and the end, so new backups added in the middle will average the start times of the prior + // and next backup to get their start time. Backups added to the beginning of the test will need to subtract from the + // epoch. + #define BACKUP_EPOCH 1570000000 + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online 9.5 resume uncompressed full backup"); + + time_t backupTimeStart = BACKUP_EPOCH; + + { + // Create pg_control + storagePutP( + storageNewWriteP( + storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path)), + .timeModified = backupTimeStart), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_95, .systemId = 1000000000000000950})); + + // Create stanza + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdStanzaCreate, argList); + + cmdStanzaCreate(); + + // Load options + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + strLstAddZ(argList, "--no-" CFGOPT_ARCHIVE_CHECK); + harnessCfgLoad(cfgCmdBackup, argList); + + // Add files + storagePutP( + storageNewWriteP(storagePgWrite(), STRDEF("postgresql.conf"), .timeModified = backupTimeStart), + BUFSTRDEF("CONFIGSTUFF")); + storagePutP( + storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart), + BUFSTRDEF(PG_VERSION_95_STR)); + storagePathCreateP(storagePgWrite(), pgWalPath(PG_VERSION_95), .noParentCreate = true); + + // Create a backup manifest that looks like a halted backup manifest + Manifest *manifestResume = manifestNewBuild(storagePg(), PG_VERSION_95, true, false, NULL, NULL); + ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume); + + manifestResumeData->backupType = backupTypeFull; + const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart); + manifestBackupLabelSet(manifestResume, resumeLabel); + + // Copy a file to be resumed that has not changed in the repo + storageCopy( + storageNewReadP(storagePg(), PG_FILE_PGVERSION_STR), + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION", strPtr(resumeLabel)))); + + strcpy( + ((ManifestFile *)manifestFileFind(manifestResume, STRDEF("pg_data/PG_VERSION")))->checksumSha1, + "06d06bb31b570b94d7b4325f511f853dbe771c21"); + + // Save the resume manifest + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(resumeLabel))))); + + // Run backup + testBackupPqScriptP(PG_VERSION_95, backupTimeStart); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + TEST_RESULT_LOG( + "P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105D944C000000000, lsn = 5d944c0/0\n" + "P00 WARN: resumable backup 20191002-070640F of same type exists -- remove invalid files and resume\n" + "P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n" + "P01 DETAIL: checksum resumed file {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n" + "P00 INFO: full backup size = [SIZE]\n" + "P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n" + "P00 INFO: backup stop archive = 0000000105D944C000000000, lsn = 5d944c0/800000\n" + "P00 INFO: new backup label = 20191002-070640F"); + + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")), + ". {link, d=20191002-070640F}\n" + "pg_data {path}\n" + "pg_data/PG_VERSION {file, s=3}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control {file, s=8192}\n" + "pg_data/pg_xlog {path}\n" + "pg_data/postgresql.conf {file, s=11}\n", + "compare file list"); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online resumed compressed 9.5 full backup"); + + // Backup start time + backupTimeStart = BACKUP_EPOCH + 100000; + + { + // Load options + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY); + harnessCfgLoad(cfgCmdBackup, argList); + + // Create a backup manifest that looks like a halted backup manifest + Manifest *manifestResume = manifestNewBuild(storagePg(), PG_VERSION_95, true, false, NULL, NULL); + ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume); + + manifestResumeData->backupType = backupTypeFull; + manifestResumeData->backupOptionCompress = true; + const String *resumeLabel = backupLabelCreate(backupTypeFull, NULL, backupTimeStart); + manifestBackupLabelSet(manifestResume, resumeLabel); + + // File exists in cluster and repo but not in the resume manifest + storagePutP( + storageNewWriteP(storagePgWrite(), STRDEF("not-in-resume"), .timeModified = backupTimeStart), BUFSTRDEF("TEST")); + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/not-in-resume.gz", strPtr(resumeLabel))), + NULL); + + // Remove checksum from file so it won't be resumed + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/pg_control.gz", strPtr(resumeLabel))), + NULL); + + ((ManifestFile *)manifestFileFind(manifestResume, STRDEF("pg_data/global/pg_control")))->checksumSha1[0] = 0; + + // Size does not match between cluster and resume manifest + storagePutP( + storageNewWriteP(storagePgWrite(), STRDEF("size-mismatch"), .timeModified = backupTimeStart), BUFSTRDEF("TEST")); + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/size-mismatch.gz", strPtr(resumeLabel))), + NULL); + manifestFileAdd( + manifestResume, &(ManifestFile){ + .name = STRDEF("pg_data/size-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", .size = 33}); + + // Time does not match between cluster and resume manifest + storagePutP( + storageNewWriteP(storagePgWrite(), STRDEF("time-mismatch"), .timeModified = backupTimeStart), BUFSTRDEF("TEST")); + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch.gz", strPtr(resumeLabel))), + NULL); + manifestFileAdd( + manifestResume, &(ManifestFile){ + .name = STRDEF("pg_data/time-mismatch"), .checksumSha1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", .size = 4, + .timestamp = backupTimeStart - 1}); + + // Size is zero in cluster and resume manifest. ??? We'd like to remove this requirement after the migration. + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("zero-size"), .timeModified = backupTimeStart), NULL); + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/zero-size.gz", strPtr(resumeLabel))), + BUFSTRDEF("ZERO-SIZE")); + manifestFileAdd( + manifestResume, &(ManifestFile){.name = STRDEF("pg_data/zero-size"), .size = 0, .timestamp = backupTimeStart}); + + // Path is not in manifest + storagePathCreateP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/bogus_path", strPtr(resumeLabel))); + + // File is not in manifest + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/global/bogus", strPtr(resumeLabel))), + NULL); + + // Save the resume manifest + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(resumeLabel))))); + + // Disable storageFeaturePath so paths will not be created before files are copied + ((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeaturePath; + + // Disable storageFeaturePathSync so paths will not be synced + ((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeaturePathSync; + + // Run backup + testBackupPqScriptP(PG_VERSION_95, backupTimeStart); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + // Enable storage features + ((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeaturePath; + ((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeaturePathSync; + + TEST_RESULT_LOG( + "P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105D95D3000000000, lsn = 5d95d30/0\n" + "P00 WARN: resumable backup 20191003-105320F of same type exists -- remove invalid files and resume\n" + "P00 DETAIL: remove path '{[path]}/repo/backup/test1/20191003-105320F/pg_data/bogus_path' from resumed backup\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/global/bogus' from resumed backup" + " (missing in manifest)\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/global/pg_control.gz' from resumed" + " backup (no checksum in resumed manifest)\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/not-in-resume.gz' from resumed backup" + " (missing in resumed manifest)\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/size-mismatch.gz' from resumed backup" + " (mismatched size)\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/time-mismatch.gz' from resumed backup" + " (mismatched timestamp)\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F/pg_data/zero-size.gz' from resumed backup" + " (zero size)\n" + "P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/time-mismatch (4B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/size-mismatch (4B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/not-in-resume (4B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/zero-size (0B, [PCT])\n" + "P00 INFO: full backup size = [SIZE]\n" + "P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n" + "P00 INFO: backup stop archive = 0000000105D95D3000000000, lsn = 5d95d30/800000\n" + "P00 INFO: check archive for segment(s) 0000000105D95D3000000000:0000000105D95D3000000000\n" + "P00 INFO: new backup label = 20191003-105320F"); + + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")), + ". {link, d=20191003-105320F}\n" + "pg_data {path}\n" + "pg_data/PG_VERSION.gz {file, s=3}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control.gz {file, s=8192}\n" + "pg_data/not-in-resume.gz {file, s=4}\n" + "pg_data/pg_xlog {path}\n" + "pg_data/pg_xlog/0000000105D95D3000000000.gz {file, s=16777216}\n" + "pg_data/postgresql.conf.gz {file, s=11}\n" + "pg_data/size-mismatch.gz {file, s=4}\n" + "pg_data/time-mismatch.gz {file, s=4}\n" + "pg_data/zero-size.gz {file, s=0}\n", + "compare file list"); + + // Remove test files + storageRemoveP(storagePgWrite(), STRDEF("not-in-resume"), .errorOnMissing = true); + storageRemoveP(storagePgWrite(), STRDEF("size-mismatch"), .errorOnMissing = true); + storageRemoveP(storagePgWrite(), STRDEF("time-mismatch"), .errorOnMissing = true); + storageRemoveP(storagePgWrite(), STRDEF("zero-size"), .errorOnMissing = true); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online resumed compressed 9.5 diff backup"); + + backupTimeStart = BACKUP_EPOCH + 200000; + + { + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_DIFF); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + harnessCfgLoad(cfgCmdBackup, argList); + + // Load the previous manifest and null out the checksum-page option to be sure it gets set to false in this backup + const String *manifestPriorFile = STRDEF(STORAGE_REPO_BACKUP "/latest/" BACKUP_MANIFEST_FILE); + Manifest *manifestPrior = manifestNewLoad(storageReadIo(storageNewReadP(storageRepo(), manifestPriorFile))); + ((ManifestData *)manifestData(manifestPrior))->backupOptionChecksumPage = NULL; + manifestSave(manifestPrior, storageWriteIo(storageNewWriteP(storageRepoWrite(), manifestPriorFile))); + + // Create a backup manifest that looks like a halted backup manifest + Manifest *manifestResume = manifestNewBuild(storagePg(), PG_VERSION_95, true, false, NULL, NULL); + ManifestData *manifestResumeData = (ManifestData *)manifestData(manifestResume); + + manifestResumeData->backupType = backupTypeDiff; + manifestResumeData->backupLabelPrior = manifestData(manifestPrior)->backupLabel; + manifestResumeData->backupOptionCompress = true; + const String *resumeLabel = backupLabelCreate(backupTypeDiff, manifestData(manifestPrior)->backupLabel, backupTimeStart); + manifestBackupLabelSet(manifestResume, resumeLabel); + + // Reference in manifest + storagePutP( + storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/PG_VERSION.gz", strPtr(resumeLabel))), + NULL); + + // Reference in resumed manifest + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF("resume-ref"), .timeModified = backupTimeStart), NULL); + storagePutP( + storageNewWriteP(storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/resume-ref.gz", strPtr(resumeLabel))), + NULL); + manifestFileAdd( + manifestResume, &(ManifestFile){.name = STRDEF("pg_data/resume-ref"), .size = 0, .reference = STRDEF("BOGUS")}); + + // Time does not match between cluster and resume manifest (but resume because time is in future so delta enabled). Note + // also that the repo file is intenionally corrupt to generate a warning about corruption in the repository. + storagePutP( + storageNewWriteP(storagePgWrite(), STRDEF("time-mismatch2"), .timeModified = backupTimeStart + 100), BUFSTRDEF("TEST")); + storagePutP( + storageNewWriteP( + storageRepoWrite(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/time-mismatch2.gz", strPtr(resumeLabel))), + NULL); + manifestFileAdd( + manifestResume, &(ManifestFile){ + .name = STRDEF("pg_data/time-mismatch2"), .checksumSha1 = "984816fd329622876e14907634264e6f332e9fb3", .size = 4, + .timestamp = backupTimeStart}); + + // Links are always removed on resume + THROW_ON_SYS_ERROR( + symlink( + "..", + strPtr(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/link", strPtr(resumeLabel))))) == -1, + FileOpenError, "unable to create symlink"); + + // Special files should not be in the repo + TEST_SYSTEM_FMT( + "mkfifo -m 666 %s", + strPtr(storagePathP(storageRepo(), strNewFmt(STORAGE_REPO_BACKUP "/%s/pg_data/pipe", strPtr(resumeLabel))))); + + // Save the resume manifest + manifestSave( + manifestResume, + storageWriteIo( + storageNewWriteP( + storageRepoWrite(), + strNewFmt(STORAGE_REPO_BACKUP "/%s/" BACKUP_MANIFEST_FILE INFO_COPY_EXT, strPtr(resumeLabel))))); + + // Run backup + testBackupPqScriptP(PG_VERSION_95, backupTimeStart); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + // Check log + TEST_RESULT_LOG( + "P00 INFO: last backup label = 20191003-105320F, version = " PROJECT_VERSION "\n" + "P00 INFO: execute exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105D9759000000000, lsn = 5d97590/0\n" + "P00 WARN: file 'time-mismatch2' has timestamp in the future, enabling delta checksum\n" + "P00 WARN: resumable backup 20191003-105320F_20191004-144000D of same type exists" + " -- remove invalid files and resume\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/PG_VERSION.gz'" + " from resumed backup (reference in manifest)\n" + "P00 WARN: remove special file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/pipe'" + " from resumed backup\n" + "P00 DETAIL: remove file '{[path]}/repo/backup/test1/20191003-105320F_20191004-144000D/pg_data/resume-ref.gz'" + " from resumed backup (reference in resumed manifest)\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n" + "P00 WARN: resumed backup file pg_data/time-mismatch2 does not have expected checksum" + " 984816fd329622876e14907634264e6f332e9fb3. The file will be recopied and backup will continue but this may be an" + " issue unless the resumed backup path in the repository is known to be corrupted.\n" + " NOTE: this does not indicate a problem with the PostgreSQL page checksums.\n" + "P01 INFO: backup file {[path]}/pg1/time-mismatch2 (4B, [PCT]) checksum [SHA1]\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/PG_VERSION (3B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/resume-ref (0B, [PCT])\n" + "P00 DETAIL: hardlink pg_data/PG_VERSION to 20191003-105320F\n" + "P00 DETAIL: hardlink pg_data/global/pg_control to 20191003-105320F\n" + "P00 DETAIL: hardlink pg_data/postgresql.conf to 20191003-105320F\n" + "P00 INFO: diff backup size = [SIZE]\n" + "P00 INFO: execute exclusive pg_stop_backup() and wait for all WAL segments to archive\n" + "P00 INFO: backup stop archive = 0000000105D9759000000000, lsn = 5d97590/800000\n" + "P00 INFO: check archive for segment(s) 0000000105D9759000000000:0000000105D9759000000000\n" + "P00 INFO: new backup label = 20191003-105320F_20191004-144000D"); + + // Check repo directory + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")), + ". {link, d=20191003-105320F_20191004-144000D}\n" + "pg_data {path}\n" + "pg_data/PG_VERSION.gz {file, s=3}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control.gz {file, s=8192}\n" + "pg_data/pg_xlog {path}\n" + "pg_data/postgresql.conf.gz {file, s=11}\n" + "pg_data/resume-ref.gz {file, s=0}\n" + "pg_data/time-mismatch2.gz {file, s=4}\n", + "compare file list"); + + // Remove test files + storageRemoveP(storagePgWrite(), STRDEF("resume-ref"), .errorOnMissing = true); + storageRemoveP(storagePgWrite(), STRDEF("time-mismatch2"), .errorOnMissing = true); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online 9.6 back-standby full backup"); + + backupTimeStart = BACKUP_EPOCH + 1200000; + + { + // Update pg_control + storagePutP( + storageNewWriteP( + storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path)), + .timeModified = backupTimeStart), + pgControlTestToBuffer((PgControl){.version = PG_VERSION_96, .systemId = 1000000000000000960})); + + // Update version + storagePutP( + storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart), + BUFSTRDEF(PG_VERSION_96_STR)); + + // Upgrade stanza + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdStanzaUpgrade, argList); + + cmdStanzaUpgrade(); + + // Load options + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG2_PATH "=%s", strPtr(pg2Path))); + strLstAddZ(argList, "--" CFGOPT_PG2_PORT "=5433"); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--no-" CFGOPT_COMPRESS); + strLstAddZ(argList, "--" CFGOPT_BACKUP_STANDBY); + strLstAddZ(argList, "--" CFGOPT_START_FAST); + strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY); + harnessCfgLoad(cfgCmdBackup, argList); + + // Create files to copy from the standby. The files will be zero-size on the primary and non-zero on the standby to test + // that they were copied from the right place. + storagePutP(storageNewWriteP(storagePgIdWrite(1), STRDEF(PG_PATH_BASE "/1/1"), .timeModified = backupTimeStart), NULL); + storagePutP(storageNewWriteP(storagePgIdWrite(2), STRDEF(PG_PATH_BASE "/1/1")), BUFSTRDEF("DATA")); + + // Set log level to warn because the following test uses multiple processes so the log order will not be deterministic + harnessLogLevelSet(logLevelWarn); + + // Run backup but error on archive check + testBackupPqScriptP(PG_VERSION_96, backupTimeStart, .noWal = true, .backupStandby = true); + TEST_ERROR( + cmdBackup(), ArchiveTimeoutError, + "WAL segment 0000000105DA69C000000000 was not archived before the 100ms timeout\n" + "HINT: check the archive_command to ensure that all options are correct (especially --stanza).\n" + "HINT: check the PostgreSQL server log for errors."); + + // Remove halted backup so there's no resume + storagePathRemoveP(storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191016-042640F"), .recurse = true); + + // Run backup + testBackupPqScriptP(PG_VERSION_96, backupTimeStart, .backupStandby = true, .walCompress = true); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + // Set log level back to detail + harnessLogLevelSet(logLevelDetail); + + TEST_RESULT_LOG( + "P00 WARN: no prior backup exists, incr backup has been changed to full"); + + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")), + ". {link, d=20191016-042640F}\n" + "pg_data {path}\n" + "pg_data/PG_VERSION {file, s=3}\n" + "pg_data/backup_label {file, s=17}\n" + "pg_data/base {path}\n" + "pg_data/base/1 {path}\n" + "pg_data/base/1/1 {file, s=4}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control {file, s=8192}\n" + "pg_data/pg_xlog {path}\n" + "pg_data/pg_xlog/0000000105DA69C000000000 {file, s=16777216}\n" + "pg_data/postgresql.conf {file, s=11}\n", + "compare file list"); + + // Remove test files + storagePathRemoveP(storagePgIdWrite(2), NULL, .recurse = true); + storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online 11 full backup with tablespaces and page checksums"); + + backupTimeStart = BACKUP_EPOCH + 2200000; + + { + // Update pg_control + storagePutP( + storageNewWriteP( + storageTest, strNewFmt("%s/" PG_PATH_GLOBAL "/" PG_FILE_PGCONTROL, strPtr(pg1Path)), + .timeModified = backupTimeStart), + pgControlTestToBuffer( + (PgControl){ + .version = PG_VERSION_11, .systemId = 1000000000000001100, .pageChecksum = true, + .walSegmentSize = 1024 * 1024})); + + // Update version + storagePutP( + storageNewWriteP(storagePgWrite(), PG_FILE_PGVERSION_STR, .timeModified = backupTimeStart), + BUFSTRDEF(PG_VERSION_11_STR)); + + // Update wal path + storagePathRemoveP(storagePgWrite(), pgWalPath(PG_VERSION_95)); + storagePathCreateP(storagePgWrite(), pgWalPath(PG_VERSION_11), .noParentCreate = true); + + // Upgrade stanza + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--no-" CFGOPT_ONLINE); + harnessCfgLoad(cfgCmdStanzaUpgrade, argList); + + cmdStanzaUpgrade(); + + // Load options + argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_FULL); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + strLstAddZ(argList, "--" CFGOPT_MANIFEST_SAVE_THRESHOLD "=1"); + strLstAddZ(argList, "--" CFGOPT_ARCHIVE_COPY); + harnessCfgLoad(cfgCmdBackup, argList); + + // Zeroed file which passes page checksums + Buffer *relation = bufNew(PG_PAGE_SIZE_DEFAULT); + memset(bufPtr(relation), 0, bufSize(relation)); + bufUsedSet(relation, bufSize(relation)); + + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/1"), .timeModified = backupTimeStart), relation); + + // Zeroed file which will fail on alignment + relation = bufNew(PG_PAGE_SIZE_DEFAULT + 1); + memset(bufPtr(relation), 0, bufSize(relation)); + bufUsedSet(relation, bufSize(relation)); + + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/2"), .timeModified = backupTimeStart), relation); + + // File with bad page checksums + relation = bufNew(PG_PAGE_SIZE_DEFAULT * 4); + memset(bufPtr(relation), 0, bufSize(relation)); + ((PageHeaderData *)(bufPtr(relation) + PG_PAGE_SIZE_DEFAULT * 0))->pd_upper = 0xFF; + ((PageHeaderData *)(bufPtr(relation) + PG_PAGE_SIZE_DEFAULT * 2))->pd_upper = 0xFE; + ((PageHeaderData *)(bufPtr(relation) + PG_PAGE_SIZE_DEFAULT * 3))->pd_upper = 0xEF; + bufUsedSet(relation, bufSize(relation)); + + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/3"), .timeModified = backupTimeStart), relation); + + // File with bad page checksum + relation = bufNew(PG_PAGE_SIZE_DEFAULT * 3); + memset(bufPtr(relation), 0, bufSize(relation)); + ((PageHeaderData *)(bufPtr(relation) + PG_PAGE_SIZE_DEFAULT * 1))->pd_upper = 0x08; + bufUsedSet(relation, bufSize(relation)); + + storagePutP(storageNewWriteP(storagePgWrite(), STRDEF(PG_PATH_BASE "/1/4"), .timeModified = backupTimeStart), relation); + + // Add a tablespace + storagePathCreateP(storagePgWrite(), STRDEF(PG_PATH_PGTBLSPC)); + THROW_ON_SYS_ERROR( + symlink("../../pg1-tblspc/32768", strPtr(storagePathP(storagePg(), STRDEF(PG_PATH_PGTBLSPC "/32768")))) == -1, + FileOpenError, "unable to create symlink"); + + storagePutP( + storageNewWriteP( + storageTest, strNewFmt("pg1-tblspc/32768/%s/1/5", strPtr(pgTablespaceId(PG_VERSION_11))), + .timeModified = backupTimeStart), + NULL); + + // Disable storageFeatureSymLink so tablespace (and latest) symlinks will not be created + ((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeatureSymLink; + + // Disable storageFeatureHardLink so hardlinks will not be created + ((Storage *)storageRepoWrite())->interface.feature ^= 1 << storageFeatureHardLink; + + // Run backup + testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .walCompress = true, .walTotal = 3); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + // Reset storage features + ((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeatureSymLink; + ((Storage *)storageRepoWrite())->interface.feature |= 1 << storageFeatureHardLink; + + TEST_RESULT_LOG( + "P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105DB5DE000000000, lsn = 5db5de0/0\n" + "P01 INFO: backup file {[path]}/pg1/base/1/3 (32KB, [PCT]) checksum [SHA1]\n" + "P00 WARN: invalid page checksums found in file {[path]}/pg1/base/1/3 at pages 0, 2-3\n" + "P01 INFO: backup file {[path]}/pg1/base/1/4 (24KB, [PCT]) checksum [SHA1]\n" + "P00 WARN: invalid page checksum found in file {[path]}/pg1/base/1/4 at page 1\n" + "P01 INFO: backup file {[path]}/pg1/base/1/2 (8KB, [PCT]) checksum [SHA1]\n" + "P00 WARN: page misalignment in file {[path]}/pg1/base/1/2: file size 8193 is not divisible by page size 8192\n" + "P01 INFO: backup file {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/base/1/1 (8KB, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n" + "P01 INFO: backup file {[path]}/pg1/pg_tblspc/32768/PG_11_201809051/1/5 (0B, [PCT])\n" + "P00 INFO: full backup size = [SIZE]\n" + "P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n" + "P00 INFO: backup stop archive = 0000000105DB5DE000000002, lsn = 5db5de0/280000\n" + "P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n" + "P00 INFO: check archive for segment(s) 0000000105DB5DE000000000:0000000105DB5DE000000002\n" + "P00 INFO: new backup label = 20191027-181320F"); + + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/20191027-181320F")), + "pg_data {path}\n" + "pg_data/PG_VERSION.gz {file, s=2}\n" + "pg_data/backup_label.gz {file, s=17}\n" + "pg_data/base {path}\n" + "pg_data/base/1 {path}\n" + "pg_data/base/1/1.gz {file, s=8192}\n" + "pg_data/base/1/2.gz {file, s=8193}\n" + "pg_data/base/1/3.gz {file, s=32768}\n" + "pg_data/base/1/4.gz {file, s=24576}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control.gz {file, s=8192}\n" + "pg_data/pg_tblspc {path}\n" + "pg_data/pg_wal {path}\n" + "pg_data/pg_wal/0000000105DB5DE000000000.gz {file, s=1048576}\n" + "pg_data/pg_wal/0000000105DB5DE000000001.gz {file, s=1048576}\n" + "pg_data/pg_wal/0000000105DB5DE000000002.gz {file, s=1048576}\n" + "pg_data/postgresql.conf.gz {file, s=11}\n" + "pg_tblspc {path}\n" + "pg_tblspc/32768 {path}\n" + "pg_tblspc/32768/PG_11_201809051 {path}\n" + "pg_tblspc/32768/PG_11_201809051/1 {path}\n" + "pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n", + "compare file list"); + + // Remove test files + storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("error when pg_control not present"); + + backupTimeStart = BACKUP_EPOCH + 2300000; + + { + // Load options + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + harnessCfgLoad(cfgCmdBackup, argList); + + // Run backup + testBackupPqScriptP(PG_VERSION_11, backupTimeStart, .errorAfterStart = true); + TEST_ERROR( + cmdBackup(), FileMissingError, + "pg_control must be present in all online backups\n" + "HINT: is something wrong with the clock or filesystem timestamps?"); + + // Check log + TEST_RESULT_LOG( + "P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n" + "P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105DB764000000000, lsn = 5db7640/0"); + + // Remove partial backup so it won't be resumed (since it errored before any checksums were written) + storagePathRemoveP( + storageRepoWrite(), STRDEF(STORAGE_REPO_BACKUP "/20191027-181320F_20191028-220000I"), .recurse = true); + } + + // ------------------------------------------------------------------------------------------------------------------------- + TEST_TITLE("online 11 incr backup with tablespaces"); + + backupTimeStart = BACKUP_EPOCH + 2400000; + + { + // Load options + StringList *argList = strLstNew(); + strLstAddZ(argList, "--" CFGOPT_STANZA "=test1"); + strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_PATH "=%s", strPtr(repoPath))); + strLstAdd(argList, strNewFmt("--" CFGOPT_PG1_PATH "=%s", strPtr(pg1Path))); + strLstAddZ(argList, "--" CFGOPT_REPO1_RETENTION_FULL "=1"); + strLstAddZ(argList, "--" CFGOPT_TYPE "=" BACKUP_TYPE_INCR); + strLstAddZ(argList, "--" CFGOPT_DELTA); + strLstAddZ(argList, "--" CFGOPT_REPO1_HARDLINK); + harnessCfgLoad(cfgCmdBackup, argList); + + // Update pg_control timestamp + THROW_ON_SYS_ERROR( + utime( + strPtr(storagePathP(storagePg(), STRDEF("global/pg_control"))), + &(struct utimbuf){.actime = backupTimeStart, .modtime = backupTimeStart}) != 0, FileWriteError, + "unable to set time"); + + // Run backup + testBackupPqScriptP(PG_VERSION_11, backupTimeStart); + TEST_RESULT_VOID(cmdBackup(), "backup"); + + TEST_RESULT_LOG( + "P00 INFO: last backup label = 20191027-181320F, version = " PROJECT_VERSION "\n" + "P00 INFO: execute non-exclusive pg_start_backup(): backup begins after the next regular checkpoint completes\n" + "P00 INFO: backup start archive = 0000000105DB8EB000000000, lsn = 5db8eb0/0\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/global/pg_control (8KB, [PCT]) checksum [SHA1]\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/postgresql.conf (11B, [PCT]) checksum [SHA1]\n" + "P01 DETAIL: match file from prior backup {[path]}/pg1/PG_VERSION (2B, [PCT]) checksum [SHA1]\n" + "P00 DETAIL: hardlink pg_data/PG_VERSION to 20191027-181320F\n" + "P00 DETAIL: hardlink pg_data/global/pg_control to 20191027-181320F\n" + "P00 DETAIL: hardlink pg_data/postgresql.conf to 20191027-181320F\n" + "P00 DETAIL: hardlink pg_tblspc/32768/PG_11_201809051/1/5 to 20191027-181320F\n" + "P00 INFO: incr backup size = [SIZE]\n" + "P00 INFO: execute non-exclusive pg_stop_backup() and wait for all WAL segments to archive\n" + "P00 INFO: backup stop archive = 0000000105DB8EB000000000, lsn = 5db8eb0/80000\n" + "P00 DETAIL: wrote 'backup_label' file returned from pg_stop_backup()\n" + "P00 INFO: check archive for segment(s) 0000000105DB8EB000000000:0000000105DB8EB000000000\n" + "P00 INFO: new backup label = 20191027-181320F_20191030-014640I"); + + TEST_RESULT_STR_Z( + testBackupValidate(storageRepo(), STRDEF(STORAGE_REPO_BACKUP "/latest")), + ". {link, d=20191027-181320F_20191030-014640I}\n" + "pg_data {path}\n" + "pg_data/PG_VERSION.gz {file, s=2}\n" + "pg_data/backup_label.gz {file, s=17}\n" + "pg_data/base {path}\n" + "pg_data/global {path}\n" + "pg_data/global/pg_control.gz {file, s=8192}\n" + "pg_data/pg_tblspc {path}\n" + "pg_data/pg_tblspc/32768 {link, d=../../pg_tblspc/32768}\n" + "pg_data/pg_wal {path}\n" + "pg_data/postgresql.conf.gz {file, s=11}\n" + "pg_tblspc {path}\n" + "pg_tblspc/32768 {path}\n" + "pg_tblspc/32768/PG_11_201809051 {path}\n" + "pg_tblspc/32768/PG_11_201809051/1 {path}\n" + "pg_tblspc/32768/PG_11_201809051/1/5.gz {file, s=0}\n", + "compare file list"); + + // Remove test files + storagePathRemoveP(storagePgWrite(), STRDEF("base/1"), .recurse = true); + } + } + FUNCTION_HARNESS_RESULT_VOID(); }