1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-11-06 08:49:29 +02:00

Fix short read in block incremental restore.

During restore it is possible to read all the blocks out of a compressed super block without reading all the input. This is because the compression format may have some trailing bytes that are not required for decompression but are required to indicate that data has ended. If a buffer aligned with the compressed data in a certain way, these last bytes might not be read.

Explicitly read out any final bytes at the end of each super block to handle this case. This should always result in no additional data out and we check for that, but it does move the read position to the beginning of the next compressed super block so decompression can begin without error.
This commit is contained in:
David Steele
2023-12-27 12:49:47 -03:00
committed by GitHub
parent c47b39acf6
commit f4a4af299b
8 changed files with 162 additions and 1 deletions

View File

@@ -1,6 +1,23 @@
<release date="XXXX-XX-XX" version="2.50dev" title="Under Development">
<release-core-list>
<release-bug-list>
<release-item>
<github-issue id="2215"/>
<github-issue id="2238"/>
<github-pull-request id="2256"/>
<release-item-contributor-list>
<release-item-ideator id="adol.rodriguez"/>
<release-item-ideator id="brent.graveland"/>
<release-item-contributor id="david.steele"/>
<release-item-reviewer id="stephen.frost"/>
<!-- Actually tester, but we don't have a tag for that yet -->
<release-item-reviewer id="brent.graveland"/>
</release-item-contributor-list>
<p>Fix short read in block incremental restore.</p>
</release-item>
<release-item>
<github-issue id="2251"/>
<github-pull-request id="2252"/>

View File

@@ -25,6 +25,11 @@
<contributor-id type="github">Flamacue</contributor-id>
</contributor>
<contributor id="adol.rodriguez">
<contributor-name-display>Adol Rodriguez</contributor-name-display>
<contributor-id type="github">adolsalamanca</contributor-id>
</contributor>
<contributor id="adrian.vondendriesch">
<contributor-name-display>Adrian Vondendriesch</contributor-name-display>
<contributor-id type="github">disco-stu</contributor-id>
@@ -120,6 +125,11 @@
<contributor-id type="github">bradnicholson</contributor-id>
</contributor>
<contributor id="brent.graveland">
<contributor-name-display>Brent Graveland</contributor-name-display>
<contributor-id type="github">graveland</contributor-id>
</contributor>
<contributor id="brian.almeida">
<contributor-name-display>Brian Almeida</contributor-name-display>
<contributor-id type="github">thunderkeys</contributor-id>

View File

@@ -293,6 +293,11 @@ blockDeltaNext(BlockDelta *const this, const BlockDeltaRead *const readDelta, Io
if (result != NULL)
break;
// Check that no bytes remain to be written. It is possible that some bytes remain in the super block, however, since we may
// have gotten all the bytes we needed but just missed reading something important, e.g. an end of file marker. If we do not
// read the remaining bytes then the next read will start too early.
ioReadFlushP(this->limitRead, .errorOnBytes = true);
this->superBlockData = NULL;
this->superBlockIdx++;
}

View File

@@ -427,6 +427,42 @@ ioReadReady(IoRead *this, IoReadReadyParam param)
FUNCTION_LOG_RETURN(BOOL, result);
}
/**********************************************************************************************************************************/
FN_EXTERN uint64_t
ioReadFlush(IoRead *const this, const IoReadFlushParam param)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_READ, this);
FUNCTION_LOG_PARAM(BOOL, param.errorOnBytes);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(this->pub.opened && !this->pub.closed);
// Flush remaining data
uint64_t result = 0;
if (!ioReadEof(this))
{
Buffer *const buffer = bufNew(ioBufferSize());
do
{
result += ioRead(this, buffer);
bufUsedZero(buffer);
}
while (!ioReadEof(this));
bufFree(buffer);
}
// Error when bytes found and error requested
if (result != 0 && param.errorOnBytes)
THROW_FMT(FileReadError, "expected EOF but flushed %" PRIu64 " byte(s)", result);
FUNCTION_LOG_RETURN(UINT64, result);
}
/**********************************************************************************************************************************/
FN_EXTERN void
ioReadClose(IoRead *this)

View File

@@ -85,6 +85,18 @@ typedef struct IoReadReadyParam
FN_EXTERN bool ioReadReady(IoRead *this, IoReadReadyParam param);
// Flush all remaining bytes and return bytes flushed. Optionally error when bytes are flushed.
typedef struct IoReadFlushParam
{
VAR_PARAM_HEADER;
bool errorOnBytes; // Error when any bytes are found during flush
} IoReadFlushParam;
#define ioReadFlushP(this, ...) \
ioReadFlush(this, (IoReadFlushParam){VAR_PARAM_INIT, __VA_ARGS__})
FN_EXTERN uint64_t ioReadFlush(IoRead *this, IoReadFlushParam param);
// Close the IO
FN_EXTERN void ioReadClose(IoRead *this);

View File

@@ -907,7 +907,7 @@ unit:
# ----------------------------------------------------------------------------------------------------------------------------
- name: restore
total: 14
total: 15
coverage:
- command/restore/blockChecksum

View File

@@ -7,11 +7,13 @@ Test Restore Command
#include "command/stanza/create.h"
#include "common/compress/helper.h"
#include "common/crypto/cipherBlock.h"
#include "common/io/bufferRead.h"
#include "postgres/version.h"
#include "storage/helper.h"
#include "storage/posix/storage.h"
#include "common/harnessBackup.h"
#include "common/harnessBlockIncr.h"
#include "common/harnessConfig.h"
#include "common/harnessInfo.h"
#include "common/harnessManifest.h"
@@ -206,6 +208,60 @@ testRun(void)
ioWriteFree(write);
}
// *****************************************************************************************************************************
if (testBegin("BlockDelta"))
{
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("super blocks are read until the end");
// Write block incremental with multiple super blocks
const Buffer *source = BUFSTRZ("123456789ABC");
Buffer *destination = bufNew(256);
IoWrite *write = ioBufferWriteNew(destination);
ioFilterGroupAdd(
ioWriteFilterGroup(write), blockIncrNew(6, 3, 5, 0, 0, 0, NULL, compressFilterP(compressTypeGz, 1, .raw = true), NULL));
ioWriteOpen(write);
ioWrite(write, source);
ioWriteClose(write);
// Extract block map
uint64_t mapSize = pckReadU64P(ioFilterGroupResultP(ioWriteFilterGroup(write), BLOCK_INCR_FILTER_TYPE));
const BlockMap *blockMap = blockMapNewRead(
ioBufferReadNewOpen(BUF(bufPtr(destination) + (bufUsed(destination) - (size_t)mapSize), (size_t)mapSize)), 3, 5);
// Perform block delta
BlockDelta *blockDelta = blockDeltaNew(blockMap, 3, 5, NULL, cipherTypeNone, NULL, compressTypeGz);
const BlockDeltaRead *blockDeltaRead = blockDeltaReadGet(blockDelta, 0);
IoRead *read = ioBufferReadNewOpen(destination);
// Set buffer size to one to make sure super blocks are properly flushed
size_t bufferSizeOld = ioBufferSize();
ioBufferSizeSet(1);
// Read all blocks from multiple super blocks
TEST_RESULT_STR_Z(strNewBuf(blockDeltaNext(blockDelta, blockDeltaRead, read)->block), "123", "read block");
TEST_RESULT_STR_Z(strNewBuf(blockDeltaNext(blockDelta, blockDeltaRead, read)->block), "456", "read block");
TEST_RESULT_STR_Z(strNewBuf(blockDeltaNext(blockDelta, blockDeltaRead, read)->block), "789", "read block");
TEST_RESULT_STR_Z(strNewBuf(blockDeltaNext(blockDelta, blockDeltaRead, read)->block), "ABC", "read block");
TEST_RESULT_PTR(blockDeltaNext(blockDelta, blockDeltaRead, read), NULL, "no more blocks");
// Reset buffer size
ioBufferSizeSet(bufferSizeOld);
// Now test the block map to show that we are reading in the right position after multiple super blocks
TEST_RESULT_STR_Z(
hrnBlockDeltaRender(blockMapNewRead(read, 3, 5), 3, 5),
"read {reference: 0, bundleId: 0, offset: 0, size: 28}\n"
" super block {max: 6, size: 14}\n"
" block {no: 0, offset: 0}\n"
" block {no: 1, offset: 3}\n"
" super block {max: 6, size: 14}\n"
" block {no: 0, offset: 6}\n"
" block {no: 1, offset: 9}\n",
"check delta");
}
// *****************************************************************************************************************************
if (testBegin("restoreFile()"))
{

View File

@@ -296,6 +296,31 @@ testRun(void)
TEST_RESULT_BOOL(ioReadOpen(bufferRead), true, " open");
TEST_RESULT_BOOL(ioReadEof(bufferRead), false, " not eof");
TEST_RESULT_UINT(ioRead(bufferRead, buffer), 0, " read 0 bytes");
TEST_RESULT_UINT(ioReadFlushP(bufferRead), 0, " flush 0 bytes");
TEST_RESULT_BOOL(ioReadEof(bufferRead), true, " now eof");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("read flush bytes");
ioBufferSizeSet(1);
bufferOriginal = bufNewC("123", 3);
TEST_ASSIGN(bufferRead, ioBufferReadNew(bufferOriginal), "create empty buffer read object");
TEST_RESULT_BOOL(ioReadOpen(bufferRead), true, " open");
TEST_RESULT_BOOL(ioReadEof(bufferRead), false, " not eof");
TEST_RESULT_UINT(ioReadFlushP(bufferRead), 3, " flush 3 bytes");
TEST_RESULT_BOOL(ioReadEof(bufferRead), true, " now eof");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("read flush bytes and error");
ioBufferSizeSet(1);
bufferOriginal = bufNewC("123", 3);
TEST_ASSIGN(bufferRead, ioBufferReadNew(bufferOriginal), "create empty buffer read object");
TEST_RESULT_BOOL(ioReadOpen(bufferRead), true, " open");
TEST_RESULT_BOOL(ioReadEof(bufferRead), false, " not eof");
TEST_ERROR(ioReadFlushP(bufferRead, .errorOnBytes = true), FileReadError, "expected EOF but flushed 3 byte(s)");
TEST_RESULT_BOOL(ioReadEof(bufferRead), true, " now eof");
// -------------------------------------------------------------------------------------------------------------------------