1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-01-16 04:54:47 +02:00

Ability to resume failed backups, better locking

This commit is contained in:
David Steele 2014-02-14 19:56:28 -05:00
parent 308652cc65
commit d100294894
3 changed files with 305 additions and 113 deletions

View File

@ -7,7 +7,6 @@ use File::Basename;
use Getopt::Long; use Getopt::Long;
use Config::IniFiles; use Config::IniFiles;
use Carp; use Carp;
use Fcntl qw(:DEFAULT :flock);
use lib dirname($0); use lib dirname($0);
use pg_backrest_utility; use pg_backrest_utility;
@ -182,6 +181,7 @@ if ($strOperation eq OP_ARCHIVE_PUSH || $strOperation eq OP_ARCHIVE_PULL)
# Get the async compress flag. If compress_async=y then compression is off for the initial push # Get the async compress flag. If compress_async=y then compression is off for the initial push
my $bCompressAsync = config_load($strSection, CONFIG_KEY_COMPRESS_ASYNC, true, "n") eq "n" ? false : true; my $bCompressAsync = config_load($strSection, CONFIG_KEY_COMPRESS_ASYNC, true, "n") eq "n" ? false : true;
# Perform the archive-push
if ($strOperation eq OP_ARCHIVE_PUSH) if ($strOperation eq OP_ARCHIVE_PUSH)
{ {
# Make sure that archive-push is running locally # Make sure that archive-push is running locally
@ -189,12 +189,6 @@ if ($strOperation eq OP_ARCHIVE_PUSH || $strOperation eq OP_ARCHIVE_PULL)
{ {
confess &log(ERROR, "stanza host cannot be set on archive-push - must be run locally on db server"); confess &log(ERROR, "stanza host cannot be set on archive-push - must be run locally on db server");
} }
# Make sure that compress and compress_async are not both set
# if (defined(config_load($strSection, CONFIG_KEY_COMPRESS)) && defined(config_load($strSection, CONFIG_KEY_COMPRESS_ASYNC)))
# {
# confess &log(ERROR, "compress and compress_async cannot both be set");
# }
# Get the compress flag # Get the compress flag
my $bCompress = $bCompressAsync ? false : config_load($strSection, CONFIG_KEY_COMPRESS, true, "y") eq "y" ? true : false; my $bCompress = $bCompressAsync ? false : config_load($strSection, CONFIG_KEY_COMPRESS, true, "y") eq "y" ? true : false;
@ -250,6 +244,7 @@ if ($strOperation eq OP_ARCHIVE_PUSH || $strOperation eq OP_ARCHIVE_PULL)
} }
} }
# Perform the archive-pull
if ($strOperation eq OP_ARCHIVE_PULL) if ($strOperation eq OP_ARCHIVE_PULL)
{ {
# Make sure that archive-pull is running on the db server # Make sure that archive-pull is running on the db server
@ -260,13 +255,9 @@ if ($strOperation eq OP_ARCHIVE_PUSH || $strOperation eq OP_ARCHIVE_PULL)
# Create a lock file to make sure archive-pull does not run more than once # Create a lock file to make sure archive-pull does not run more than once
my $strArchivePath = config_load(CONFIG_SECTION_ARCHIVE, CONFIG_KEY_PATH); my $strArchivePath = config_load(CONFIG_SECTION_ARCHIVE, CONFIG_KEY_PATH);
my $strLockFile = "${strArchivePath}/lock/archive-${strStanza}.lock"; my $strLockFile = "${strArchivePath}/lock/${strStanza}-archive.lock";
my $fLockFile;
sysopen($fLockFile, $strLockFile, O_WRONLY | O_CREAT) if (!lock_file_create($strLockFile))
or confess &log(ERROR, "unable to open lock file ${strLockFile}");
if (!flock($fLockFile, LOCK_EX | LOCK_NB))
{ {
&log(DEBUG, "archive-pull process is already running - exiting"); &log(DEBUG, "archive-pull process is already running - exiting");
exit 0 exit 0
@ -305,6 +296,8 @@ if ($strOperation eq OP_ARCHIVE_PUSH || $strOperation eq OP_ARCHIVE_PULL)
{ {
sleep(5); sleep(5);
} }
lock_file_remove();
} }
exit 0; exit 0;
@ -386,12 +379,19 @@ backup_init
#################################################################################################################################### ####################################################################################################################################
if ($strOperation eq OP_BACKUP) if ($strOperation eq OP_BACKUP)
{ {
# !!! Pick the log file name here (backup, restore, archive-YYYYMMDD) my $strLockFile = $oFile->path_get(PATH_BACKUP, "lock/${strStanza}-backup.lock");
my $strLogFile = "";
if (!lock_file_create($strLockFile))
{
&log(DEBUG, "backup process is already running for stanza ${strStanza} - exiting");
exit 0
}
backup(config_load(CONFIG_SECTION_STANZA, CONFIG_KEY_PATH)); backup(config_load(CONFIG_SECTION_STANZA, CONFIG_KEY_PATH));
$strOperation = OP_EXPIRE; $strOperation = OP_EXPIRE;
lock_file_remove();
} }
#################################################################################################################################### ####################################################################################################################################
@ -399,6 +399,14 @@ if ($strOperation eq OP_BACKUP)
#################################################################################################################################### ####################################################################################################################################
if ($strOperation eq OP_EXPIRE) if ($strOperation eq OP_EXPIRE)
{ {
my $strLockFile = $oFile->path_get(PATH_BACKUP, "lock/${strStanza}-expire.lock");
if (!lock_file_create($strLockFile))
{
&log(DEBUG, "expire process is already running for stanza ${strStanza} - exiting");
exit 0
}
backup_expire backup_expire
( (
$oFile->path_get(PATH_BACKUP_CLUSTER), $oFile->path_get(PATH_BACKUP_CLUSTER),
@ -408,6 +416,8 @@ if ($strOperation eq OP_EXPIRE)
config_load(CONFIG_SECTION_RETENTION, "archive_retention") config_load(CONFIG_SECTION_RETENTION, "archive_retention")
); );
lock_file_remove();
exit 0; exit 0;
} }

View File

@ -382,6 +382,140 @@ sub backup_manifest_save
tied(%oBackupManifest)->WriteConfig($strBackupManifestFile); tied(%oBackupManifest)->WriteConfig($strBackupManifestFile);
} }
####################################################################################################################################
# BACKUP_FILE_NOT_IN_MANIFEST - Find all files in a backup path that are not in the supplied manifest
####################################################################################################################################
sub backup_file_not_in_manifest
{
my $strPathType = shift;
my $oManifestRef = shift;
my %oFileHash = $oFile->manifest_get($strPathType);
my @stryFile;
my $iFileTotal = 0;
foreach my $strName (sort(keys $oFileHash{name}))
{
# Ignore certain files that will never be in the manifest
if ($strName eq "backup.manifest" ||
$strName eq ".")
{
next;
}
# Get the base directory
my $strBasePath = (split("/", $strName))[0];
if ($strBasePath eq $strName)
{
my $strSection = $strBasePath eq "tablespace" ? "base:tablespace" : "${strBasePath}:path";
if (defined(${$oManifestRef}{"${strSection}"}))
{
next;
}
}
else
{
my $strPath = substr($strName, length($strBasePath) + 1);
# Create the section from the base path
my $strSection = $strBasePath;
if ($strSection eq "tablespace")
{
my $strTablespace = (split("/", $strPath))[0];
$strSection = $strSection . ":" . $strTablespace;
if ($strTablespace eq $strPath)
{
if (defined(${$oManifestRef}{"${strSection}:path"}))
{
next;
}
}
$strPath = substr($strPath, length($strTablespace) + 1);
}
my $cType = $oFileHash{name}{"${strName}"}{type};
if ($cType eq "d")
{
if (defined(${$oManifestRef}{"${strSection}:path"}{"${strPath}"}))
{
next;
}
}
else
{
if (defined(${$oManifestRef}{"${strSection}:file"}{"${strPath}"}))
{
if (${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{size} ==
$oFileHash{name}{"${strName}"}{size} &&
${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{modification_time} ==
$oFileHash{name}{"${strName}"}{modification_time})
{
${$oManifestRef}{"${strSection}:file"}{"${strPath}"}{exists} = true;
next;
}
}
}
}
$stryFile[$iFileTotal] = $strName;
$iFileTotal++;
}
return @stryFile;
}
####################################################################################################################################
# BACKUP_TMP_CLEAN
#
# Cleans the temp directory from a previous failed backup so it can be reused
####################################################################################################################################
sub backup_tmp_clean
{
my $oManifestRef = shift;
&log(INFO, "cleaning backup tmp path");
# Remove the pg_xlog directory since it contains nothing useful for the new backup
if (-e $oFile->path_get(PATH_BACKUP_TMP, "base/pg_xlog"))
{
rmtree($oFile->path_get(PATH_BACKUP_TMP, "base/pg_xlog")) or confess &log(ERROR, "unable to delete tmp pg_xlog path");
}
# Remove the pg_tblspc directory since it is trivial to rebuild, but hard to compare
if (-e $oFile->path_get(PATH_BACKUP_TMP, "base/pg_tblspc"))
{
rmtree($oFile->path_get(PATH_BACKUP_TMP, "base/pg_tblspc")) or confess &log(ERROR, "unable to delete tmp pg_tblspc path");
}
# Get the list of files that should be deleted from temp
my @stryFile = backup_file_not_in_manifest(PATH_BACKUP_TMP, $oManifestRef);
foreach my $strFile (sort {$b cmp $a} @stryFile)
{
my $strDelete = $oFile->path_get(PATH_BACKUP_TMP, $strFile);
# If a path then delete it, all the files should have already been deleted since we are going in reverse order
if (-d $strDelete)
{
&log(DEBUG, "remove path ${strDelete}");
rmdir($strDelete) or confess &log(ERROR, "unable to delete path ${strDelete}, is it empty?");
}
# Else delete a file
else
{
&log(DEBUG, "remove file ${strDelete}");
unlink($strDelete) or confess &log(ERROR, "unable to delete file ${strDelete}");
}
}
}
#################################################################################################################################### ####################################################################################################################################
# BACKUP_MANIFEST_BUILD - Create the backup manifest # BACKUP_MANIFEST_BUILD - Create the backup manifest
#################################################################################################################################### ####################################################################################################################################
@ -393,12 +527,12 @@ sub backup_manifest_build
my $oLastManifestRef = shift; my $oLastManifestRef = shift;
my $oTablespaceMapRef = shift; my $oTablespaceMapRef = shift;
my $strLevel = shift; my $strLevel = shift;
if (!defined($strLevel)) if (!defined($strLevel))
{ {
$strLevel = "base"; $strLevel = "base";
} }
my %oManifestHash = $oFile->manifest_get(PATH_DB_ABSOLUTE, $strDbClusterPath); my %oManifestHash = $oFile->manifest_get(PATH_DB_ABSOLUTE, $strDbClusterPath);
my $strName; my $strName;
@ -589,8 +723,16 @@ sub backup_file
foreach $strPath (sort(keys ${$oBackupManifestRef}{"${strSectionPath}"})) foreach $strPath (sort(keys ${$oBackupManifestRef}{"${strSectionPath}"}))
{ {
$oFile->path_create(PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strPath}", if (defined(${$oBackupManifestRef}{"${strSectionPath}"}{"$strPath"}{exists}))
${$oBackupManifestRef}{"${strSectionPath}"}{"$strPath"}{permission}); {
&log(TRACE, "path ${strPath} already exists from previous backup attempt");
${$oBackupManifestRef}{"${strSectionPath}"}{"$strPath"}{exists} = undef;
}
else
{
$oFile->path_create(PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strPath}",
${$oBackupManifestRef}{"${strSectionPath}"}{"$strPath"}{permission});
}
} }
} }
@ -606,41 +748,51 @@ sub backup_file
foreach $strFile (sort(keys ${$oBackupManifestRef}{"${strSectionFile}"})) foreach $strFile (sort(keys ${$oBackupManifestRef}{"${strSectionFile}"}))
{ {
my $strBackupSourceFile = "${strBackupSourcePath}/${strFile}"; my $strBackupSourceFile = "${strBackupSourcePath}/${strFile}";
# If the file has a reference it does not need to be copied since it can be retrieved from the referenced backup. if (defined(${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{exists}))
# However, if hard-linking is turned on the link will need to be created
my $strReference = ${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{reference};
if (defined($strReference))
{ {
# If hardlinking is turned on then create a hardlink for files that have not changed since the last backup &log(TRACE, "file ${strFile} already exists from previous backup attempt");
if ($bHardLink) ${$oBackupManifestRef}{"${strSectionPath}"}{"$strFile"}{exists} = undef;
{
&log(DEBUG, "hard-linking ${strBackupSourceFile} from ${strReference}");
$oFile->link_create(PATH_BACKUP_CLUSTER, "${strReference}/${strBackupDestinationPath}/${strFile}",
PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strFile}", true, false, !$bPathCreate);
}
} }
# Else copy/compress the file and generate a checksum
else else
{ {
my $lFileSize = ${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{size}; # If the file has a reference it does not need to be copied since it can be retrieved from the referenced backup.
# However, if hard-linking is turned on the link will need to be created
my $strReference = ${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{reference};
$lFileTotal++; if (defined($strReference))
$lFileLargeSize += $lFileSize > $iSmallFileThreshold ? $lFileSize : 0; {
$lFileLargeTotal += $lFileSize > $iSmallFileThreshold ? 1 : 0; # If hardlinking is turned on then create a hardlink for files that have not changed since the last backup
$lFileSmallSize += $lFileSize <= $iSmallFileThreshold ? $lFileSize : 0; if ($bHardLink)
$lFileSmallTotal += $lFileSize <= $iSmallFileThreshold ? 1 : 0; {
&log(DEBUG, "hard-linking ${strBackupSourceFile} from ${strReference}");
my $strKey = sprintf("ts%012x-fs%012x-fn%012x", $lTablespaceIdx,
$lFileSize, $lFileTotal);
$oFileCopyMap{"${strKey}"}{db_file} = $strBackupSourceFile; $oFile->link_create(PATH_BACKUP_CLUSTER, "${strReference}/${strBackupDestinationPath}/${strFile}",
$oFileCopyMap{"${strKey}"}{backup_file} = "${strBackupDestinationPath}/${strFile}"; PATH_BACKUP_TMP, "${strBackupDestinationPath}/${strFile}", true, false, !$bPathCreate);
$oFileCopyMap{"${strKey}"}{size} = $lFileSize; }
$oFileCopyMap{"${strKey}"}{modification_time} = }
${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{modification_time}; # Else copy/compress the file and generate a checksum
else
{
my $lFileSize = ${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{size};
# Setup variables needed for threaded copy
$lFileTotal++;
$lFileLargeSize += $lFileSize > $iSmallFileThreshold ? $lFileSize : 0;
$lFileLargeTotal += $lFileSize > $iSmallFileThreshold ? 1 : 0;
$lFileSmallSize += $lFileSize <= $iSmallFileThreshold ? $lFileSize : 0;
$lFileSmallTotal += $lFileSize <= $iSmallFileThreshold ? 1 : 0;
# Load the hash used by threaded copy
my $strKey = sprintf("ts%012x-fs%012x-fn%012x", $lTablespaceIdx,
$lFileSize, $lFileTotal);
$oFileCopyMap{"${strKey}"}{db_file} = $strBackupSourceFile;
$oFileCopyMap{"${strKey}"}{backup_file} = "${strBackupDestinationPath}/${strFile}";
$oFileCopyMap{"${strKey}"}{size} = $lFileSize;
$oFileCopyMap{"${strKey}"}{modification_time} =
${$oBackupManifestRef}{"${strSectionFile}"}{"$strFile"}{modification_time};
}
} }
} }
} }
@ -650,23 +802,24 @@ sub backup_file
&log(DEBUG, "actual threads ${iThreadLocalMax}/${iThreadMax}"); &log(DEBUG, "actual threads ${iThreadLocalMax}/${iThreadMax}");
# Initialize the thread size array # Initialize the thread size array
my @lyThreadFileSize; my @oyThreadData;
for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++)
{ {
$lyThreadFileSize[$iThreadIdx] = 0; $oyThreadData[$iThreadIdx]{size} = 0;
$oyThreadData[$iThreadIdx]{total} = 0;
$oyThreadData[$iThreadIdx]{large_size} = 0;
$oyThreadData[$iThreadIdx]{large_total} = 0;
$oyThreadData[$iThreadIdx]{small_size} = 0;
$oyThreadData[$iThreadIdx]{small_total} = 0;
} }
# Assign files to each thread queue # Assign files to each thread queue
my $iThreadFileSmallIdx = 0; my $iThreadFileSmallIdx = 0;
my $iThreadFileSmallTotalMax = int($lFileSmallTotal / $iThreadLocalMax); my $iThreadFileSmallTotalMax = int($lFileSmallTotal / $iThreadLocalMax);
my $fThreadFileSmallSize = 0;
my $iThreadFileSmallTotal = 0;
my $iThreadFileLargeIdx = 0; my $iThreadFileLargeIdx = 0;
my $fThreadFileLargeSizeMax = $lFileLargeSize / $iThreadLocalMax; my $fThreadFileLargeSizeMax = $lFileLargeSize / $iThreadLocalMax;
my $fThreadFileLargeSize = 0;
my $iThreadFileLargeTotal = 0;
&log(INFO, "file total ${lFileTotal}"); &log(INFO, "file total ${lFileTotal}");
&log(INFO, "file small total ${lFileSmallTotal}, small size: " . file_size_format($lFileSmallSize) . &log(INFO, "file small total ${lFileSmallTotal}, small size: " . file_size_format($lFileSmallSize) .
@ -682,55 +835,47 @@ sub backup_file
{ {
$oThreadQueue[$iThreadFileLargeIdx]->enqueue($strFile); $oThreadQueue[$iThreadFileLargeIdx]->enqueue($strFile);
$fThreadFileLargeSize += $lFileSize; $oyThreadData[$iThreadFileLargeIdx]{large_size} += $lFileSize;
$iThreadFileLargeTotal++; $oyThreadData[$iThreadFileLargeIdx]{large_total}++;
$lyThreadFileSize[$iThreadFileLargeIdx] += $lFileSize; $oyThreadData[$iThreadFileLargeIdx]{size} += $lFileSize;
if ($fThreadFileLargeSize >= $fThreadFileLargeSizeMax && $iThreadFileLargeIdx < $iThreadLocalMax - 1) if ($oyThreadData[$iThreadFileLargeIdx]{large_size} >= $fThreadFileLargeSizeMax &&
$iThreadFileLargeIdx < $iThreadLocalMax - 1)
{ {
&log(INFO, "thread ${iThreadFileLargeIdx} large total ${iThreadFileLargeTotal}, size ${fThreadFileLargeSize}" .
" (" . file_size_format(int(${fThreadFileLargeSize})) . ")");
$iThreadFileLargeIdx++; $iThreadFileLargeIdx++;
$fThreadFileLargeSize = 0;
$iThreadFileLargeTotal = 0;
} }
} }
else else
{ {
$oThreadQueue[$iThreadFileSmallIdx]->enqueue($strFile); $oThreadQueue[$iThreadFileSmallIdx]->enqueue($strFile);
$fThreadFileSmallSize += $lFileSize; $oyThreadData[$iThreadFileSmallIdx]{small_size} += $lFileSize;
$iThreadFileSmallTotal++; $oyThreadData[$iThreadFileSmallIdx]{small_total}++;
$lyThreadFileSize[$iThreadFileSmallIdx] += $lFileSize; $oyThreadData[$iThreadFileSmallIdx]{size} += $lFileSize;
if ($iThreadFileSmallTotal >= $iThreadFileSmallTotalMax && $iThreadFileSmallIdx < $iThreadLocalMax - 1) if ($oyThreadData[$iThreadFileSmallIdx]{small_total} >= $iThreadFileSmallTotalMax &&
$iThreadFileSmallIdx < $iThreadLocalMax - 1)
{ {
&log(INFO, "thread ${iThreadFileSmallIdx} small total ${iThreadFileSmallTotal}, size ${fThreadFileSmallSize}" .
" (" . file_size_format(int(${fThreadFileSmallSize})) . ")");
$iThreadFileSmallIdx++; $iThreadFileSmallIdx++;
$fThreadFileSmallSize = 0;
$iThreadFileSmallTotal = 0;
} }
} }
} }
&log(INFO, "thread ${iThreadFileLargeIdx} large total ${iThreadFileLargeTotal}, size ${fThreadFileLargeSize}" .
" (" . file_size_format(int(${fThreadFileLargeSize})) . ")");
&log(INFO, "thread ${iThreadFileSmallIdx} small total ${iThreadFileSmallTotal}, size ${fThreadFileSmallSize}" .
" (" . file_size_format(int(${fThreadFileLargeSize})) . ")");
# End each thread queue and start the thread # End each thread queue and start the thread
my @oThread; my @oThread;
for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++)
{ {
&log(INFO, "thread ${iThreadIdx} large total $oyThreadData[$iThreadIdx]{large_total}, " .
"size $oyThreadData[$iThreadIdx]{large_size}");
&log(INFO, "thread ${iThreadIdx} small total $oyThreadData[$iThreadIdx]{small_total}, " .
"size $oyThreadData[$iThreadIdx]{small_size}");
$oThreadQueue[$iThreadIdx]->enqueue(undef); $oThreadQueue[$iThreadIdx]->enqueue(undef);
$oThread[$iThreadIdx] = threads->create(\&backup_file_thread, $iThreadIdx, $bNoChecksum, !$bPathCreate, $oThread[$iThreadIdx] = threads->create(\&backup_file_thread, $iThreadIdx, $bNoChecksum, !$bPathCreate,
$lyThreadFileSize[$iThreadIdx]); $oyThreadData[$iThreadIdx]{size});
} }
# Rejoin the threads # Rejoin the threads
for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++) for (my $iThreadIdx = 0; $iThreadIdx < $iThreadLocalMax; $iThreadIdx++)
{ {
@ -760,8 +905,6 @@ sub backup_file_thread
while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue()) while (my $strFile = $oThreadQueue[$iThreadIdx]->dequeue())
{ {
# &log(INFO, "total ${lSizeTotal} size ${lSize}");
&log(INFO, "thread ${iThreadIdx} backing up file $oFileCopyMap{$strFile}{db_file} (" . &log(INFO, "thread ${iThreadIdx} backing up file $oFileCopyMap{$strFile}{db_file} (" .
file_size_format($oFileCopyMap{$strFile}{size}) . file_size_format($oFileCopyMap{$strFile}{size}) .
($lSizeTotal > 0 ? ", " . int($lSize * 100 / $lSizeTotal) . "%" : "") . ")"); ($lSizeTotal > 0 ? ", " . int($lSize * 100 / $lSizeTotal) . "%" : "") . ")");
@ -854,19 +997,27 @@ sub backup
my $strBackupTmpPath = $oFile->path_get(PATH_BACKUP_TMP); my $strBackupTmpPath = $oFile->path_get(PATH_BACKUP_TMP);
my $strBackupConfFile = $oFile->path_get(PATH_BACKUP_TMP, "backup.manifest"); my $strBackupConfFile = $oFile->path_get(PATH_BACKUP_TMP, "backup.manifest");
# If the backup tmp path already exists, delete the conf file # Start backup
my %oBackupManifest;
${oBackupManifest}{backup}{label} = $strBackupPath;
my $strArchiveStart = $oDb->backup_start($strBackupPath);
${oBackupManifest}{backup}{archive_start} = $strArchiveStart;
&log(INFO, 'archive start: ' . ${oBackupManifest}{backup}{archive_start});
# Build the backup manifest
my %oTablespaceMap = $oDb->tablespace_map_get();
backup_manifest_build($oFile->{strCommandManifest}, $strDbClusterPath, \%oBackupManifest, \%oLastManifest, \%oTablespaceMap);
# If the backup tmp path already exists, remove invalid files
if (-e $strBackupTmpPath) if (-e $strBackupTmpPath)
{ {
&log(WARN, "backup path $strBackupTmpPath already exists"); &log(WARN, "aborted backup already exists, will be cleaned to remove invalid files and resumed");
# !!! This is temporary until we can clean backup dirs # Clean the old backup tmp path
system("rm -rf $strBackupTmpPath") == 0 or confess &log(ERROR, "unable to delete ${strBackupTmpPath}"); backup_tmp_clean(\%oBackupManifest);
# rmtree($strBackupTmpPath) or confess &log(ERROR, "unable to delete ${strBackupTmpPath}");
$oFile->path_create(PATH_BACKUP_TMP);
#if (-e $strBackupConfFile)
#{
# unlink $strBackupConfFile or die &log(ERROR, "backup config ${strBackupConfFile} could not be deleted");
#}
} }
# Else create the backup tmp path # Else create the backup tmp path
else else
@ -875,24 +1026,6 @@ sub backup
$oFile->path_create(PATH_BACKUP_TMP); $oFile->path_create(PATH_BACKUP_TMP);
} }
# Create a new backup manifest hash
my %oBackupManifest;
# Start backup
${oBackupManifest}{backup}{label} = $strBackupPath;
my $strArchiveStart = $oDb->backup_start($strBackupPath);
${oBackupManifest}{backup}{archive_start} = $strArchiveStart;
&log(INFO, 'archive start: ' . $strArchiveStart);
# Build the backup manifest
my %oTablespaceMap = $oDb->tablespace_map_get();
backup_manifest_build($oFile->{strCommandManifest}, $strDbClusterPath, \%oBackupManifest, \%oLastManifest, \%oTablespaceMap);
# Delete files leftover from a partial backup
# !!! do it
# Save the backup conf file first time - so we can see what is happening in the backup # Save the backup conf file first time - so we can see what is happening in the backup
backup_manifest_save($strBackupConfFile, \%oBackupManifest); backup_manifest_save($strBackupConfFile, \%oBackupManifest);
@ -903,8 +1036,7 @@ sub backup
my $strArchiveStop = $oDb->backup_stop(); my $strArchiveStop = $oDb->backup_stop();
${oBackupManifest}{backup}{archive_stop} = $strArchiveStop; ${oBackupManifest}{backup}{archive_stop} = $strArchiveStop;
&log(INFO, 'archive stop: ' . ${oBackupManifest}{backup}{archive_stop});
&log(INFO, 'archive stop: ' . $strArchiveStop);
# If archive logs are required to complete the backup, then fetch them. This is the default, but can be overridden if the # If archive logs are required to complete the backup, then fetch them. This is the default, but can be overridden if the
# archive logs are going to a different server. Be careful here because there is no way to verify that the backup will be # archive logs are going to a different server. Be careful here because there is no way to verify that the backup will be

View File

@ -7,11 +7,13 @@ use strict;
use warnings; use warnings;
use Carp; use Carp;
use IPC::System::Simple qw(capture); use IPC::System::Simple qw(capture);
use Fcntl qw(:DEFAULT :flock);
use Exporter qw(import); use Exporter qw(import);
our @EXPORT = qw(data_hash_build trim common_prefix wait_for_file date_string_get file_size_format execute our @EXPORT = qw(data_hash_build trim common_prefix wait_for_file date_string_get file_size_format execute
log log_file_set log_level_set log log_file_set log_level_set
lock_file_create lock_file_remove
TRACE DEBUG ERROR ASSERT WARN INFO true false); TRACE DEBUG ERROR ASSERT WARN INFO true false);
# Global constants # Global constants
@ -37,6 +39,9 @@ my $strLogLevelFile = ERROR;
my $strLogLevelConsole = ERROR; my $strLogLevelConsole = ERROR;
my %oLogLevelRank; my %oLogLevelRank;
my $strLockFile;
my $hLockFile;
$oLogLevelRank{TRACE}{rank} = 6; $oLogLevelRank{TRACE}{rank} = 6;
$oLogLevelRank{DEBUG}{rank} = 5; $oLogLevelRank{DEBUG}{rank} = 5;
$oLogLevelRank{INFO}{rank} = 4; $oLogLevelRank{INFO}{rank} = 4;
@ -45,6 +50,51 @@ $oLogLevelRank{ERROR}{rank} = 2;
$oLogLevelRank{ASSERT}{rank} = 1; $oLogLevelRank{ASSERT}{rank} = 1;
$oLogLevelRank{OFF}{rank} = 0; $oLogLevelRank{OFF}{rank} = 0;
####################################################################################################################################
# LOCK_FILE_CREATE
####################################################################################################################################
sub lock_file_create
{
my $strLockFileParam = shift;
$strLockFile = $strLockFileParam;
if (defined($hLockFile))
{
confess &lock(ASSERT, "${strLockFile} lock is already held, cannot create lock ${strLockFile}");
}
sysopen($hLockFile, $strLockFile, O_WRONLY | O_CREAT)
or confess &log(ERROR, "unable to open lock file ${strLockFile}");
if (!flock($hLockFile, LOCK_EX | LOCK_NB))
{
close($hLockFile);
return 0;
}
return $hLockFile;
}
####################################################################################################################################
# LOCK_FILE_REMOVE
####################################################################################################################################
sub lock_file_remove
{
if (defined($hLockFile))
{
close($hLockFile);
unlink($strLockFile) or confess &log(ERROR, "unable to remove lock file ${strLockFile}");
$hLockFile = undef;
$strLockFile = undef;
}
else
{
confess &log(ASSERT, "there is no lock to free");
}
}
#################################################################################################################################### ####################################################################################################################################
# DATA_HASH_BUILD - Hash a delimited file with header # DATA_HASH_BUILD - Hash a delimited file with header
#################################################################################################################################### ####################################################################################################################################