1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-07-15 01:04:37 +02:00

Removing chunking and block numbers from incremental block list.

These were intended to allow the block list to be scanned without reading the map but were never utilized. They were left in "just in case" and because they did not seem to be doing any harm.

In fact, it is better not to have the block numbers because this allows us set the block size at a future time as long as it is a factor of the super block size. One way this could be useful is to store older files without super blocks or a map in the full backup and then build a map for them if the file gets modified in a diff/incr backup. This would require reading the file from the full backup to build the map but it would be more space efficient and we could make more intelligent decisions about block size. It would also be possible to change the block size even if one had already been selected in a prior backup.

Omitting the block numbers makes the chunking unnecessary since there is now no way to make sense of the block list without the map. Also, we might want to build maps for unchunked block lists, i.e. files that were copied normally.
This commit is contained in:
David Steele
2023-04-27 23:29:12 +03:00
committed by GitHub
parent 3fc3690dd7
commit dd4e52679e
16 changed files with 193 additions and 485 deletions

View File

@ -17,6 +17,21 @@
<release date="XXXX-XX-XX" version="2.46dev" title="UNDER DEVELOPMENT">
<release-core-list>
<release-feature-list>
<release-item>
<commit subject="Removing chunking and block numbers from incremental block list.">
<github-pull-request id="2029"/>
</commit>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>
<release-item-reviewer id="john.morris"/>
<release-item-reviewer id="stephen.frost"/>
<release-item-reviewer id="stefan.fercot"/>
</release-item-contributor-list>
<p>Block incremental backup.</p>
</release-item>
<release-item>
<github-pull-request id="2051"/>

View File

@ -117,12 +117,10 @@ SRCS = \
common/exec.c \
common/fork.c \
common/ini.c \
common/io/chunkedRead.c \
common/io/client.c \
common/io/fd.c \
common/io/fdRead.c \
common/io/fdWrite.c \
common/io/filter/chunk.c \
common/io/filter/size.c \
common/io/http/client.c \
common/io/http/common.c \
@ -132,6 +130,7 @@ SRCS = \
common/io/http/response.c \
common/io/http/session.c \
common/io/http/url.c \
common/io/limitRead.c \
common/io/server.c \
common/io/session.c \
common/io/socket/client.c \

View File

@ -12,7 +12,6 @@ Block Incremental Filter
#include "common/io/bufferRead.h"
#include "common/io/bufferWrite.h"
#include "common/io/filter/buffer.h"
#include "common/io/filter/chunk.h"
#include "common/io/filter/size.h"
#include "common/io/io.h"
#include "common/log.h"
@ -32,7 +31,6 @@ typedef struct BlockIncr
const Pack *encryptParam; // Encrypt filter parameters
unsigned int blockNo; // Block number
unsigned int blockNoLast; // Last block no
uint64_t superBlockNo; // Block no in super block
uint64_t blockOffset; // Block offset
uint64_t superBlockSize; // Super block
@ -143,38 +141,23 @@ blockIncrProcess(THIS_VOID, const Buffer *const input, Buffer *const output)
}
MEM_CONTEXT_OBJ_END();
bool bufferRequired = true;
// Add compress filter
if (this->compressParam != NULL)
{
ioFilterGroupAdd(
ioWriteFilterGroup(this->blockOutWrite),
compressFilterPack(this->compressType, this->compressParam));
bufferRequired = false;
}
// Add encrypt filter
if (this->encryptParam != NULL)
{
ioFilterGroupAdd(
ioWriteFilterGroup(this->blockOutWrite), cipherBlockNewPack(this->encryptParam));
bufferRequired = false;
}
ioFilterGroupAdd(ioWriteFilterGroup(this->blockOutWrite), cipherBlockNewPack(this->encryptParam));
// If no compress/encrypt then add a buffer so chunk sizes are as large as possible
if (bufferRequired)
ioFilterGroupAdd(ioWriteFilterGroup(this->blockOutWrite), ioBufferNew());
// Add chunk and size filters
ioFilterGroupAdd(ioWriteFilterGroup(this->blockOutWrite), ioChunkNew());
// Add size filter
ioFilterGroupAdd(ioWriteFilterGroup(this->blockOutWrite), ioSizeNew());
ioWriteOpen(this->blockOutWrite);
}
// Write the block no as a delta of the prior block no
ioWriteVarIntU64(this->blockOutWrite, this->blockNo - this->blockNoLast);
// Copy block data through the filters
ioCopyP(ioBufferReadNewOpen(this->block), this->blockOutWrite);
this->blockOutSize += bufUsed(this->block);
@ -196,9 +179,6 @@ blockIncrProcess(THIS_VOID, const Buffer *const input, Buffer *const output)
blockMapAdd(this->blockMapOut, &blockMapItem);
lstAdd(this->blockOutList, &blockMapItemIdx);
// Set last block no
this->blockNoLast = this->blockNo;
// Increment super block no
this->superBlockNo++;
}

View File

@ -7,7 +7,7 @@ Block Restore
#include "command/restore/blockDelta.h"
#include "common/crypto/cipherBlock.h"
#include "common/debug.h"
#include "common/io/chunkedRead.h"
#include "common/io/limitRead.h"
#include "common/log.h"
/***********************************************************************************************************************************
@ -38,7 +38,7 @@ struct BlockDelta
const BlockDeltaSuperBlock *superBlockData; // Current super block data
unsigned int superBlockIdx; // Current super block index
IoRead *chunkedRead; // Chunked read for current super block
IoRead *limitRead; // Limit read for current super block
const BlockDeltaBlock *blockData; // Current block data
unsigned int blockIdx; // Current block index
unsigned int blockTotal; // Block total for super block
@ -227,27 +227,27 @@ blockDeltaNext(BlockDelta *const this, const BlockDeltaRead *const readDelta, Io
// If the super block read has not begun yet
if (this->superBlockData == NULL)
{
// Free prior chunked read and create chunked read for current super block
ioReadFree(this->chunkedRead);
// Free prior limit read and create limit read for current super block
ioReadFree(this->limitRead);
this->superBlockData = lstGet(readDelta->superBlockList, this->superBlockIdx);
MEM_CONTEXT_OBJ_BEGIN(this)
{
this->chunkedRead = ioChunkedReadNew(readIo);
this->limitRead = ioLimitReadNew(readIo, this->superBlockData->size);
}
MEM_CONTEXT_OBJ_END();
if (this->cipherType != cipherTypeNone)
{
ioFilterGroupAdd(
ioReadFilterGroup(this->chunkedRead),
ioReadFilterGroup(this->limitRead),
cipherBlockNewP(cipherModeDecrypt, this->cipherType, BUFSTR(this->cipherPass), .raw = true));
}
if (this->compressType != compressTypeNone)
ioFilterGroupAdd(ioReadFilterGroup(this->chunkedRead), decompressFilterP(this->compressType, .raw = true));
ioFilterGroupAdd(ioReadFilterGroup(this->limitRead), decompressFilterP(this->compressType, .raw = true));
ioReadOpen(this->chunkedRead);
ioReadOpen(this->limitRead);
// Set block info
this->blockIdx = 0;
@ -261,18 +261,11 @@ blockDeltaNext(BlockDelta *const this, const BlockDeltaRead *const readDelta, Io
// Find required blocks in the super block
while (this->blockIdx < this->blockTotal)
{
// Read encoded info about the block, which is not used here
ioReadVarIntU64(this->chunkedRead);
// Clear buffer and read block
bufUsedZero(this->write.block);
bufLimitClear(this->write.block);
// Apply block size limit if required and read the block
bufUsedSet(this->write.block, 0);
if (this->blockIdx == this->blockTotal - 1 && this->superBlockData->superBlockSize % this->blockSize != 0)
bufLimitSet(this->write.block, (size_t)(this->superBlockData->superBlockSize % this->blockSize));
else
bufLimitClear(this->write.block);
ioRead(this->chunkedRead, this->write.block);
ioRead(this->limitRead, this->write.block);
// If the block matches the block we are expecting
if (this->blockIdx == this->blockData->no)

View File

@ -15,7 +15,6 @@ Restore File
#include "common/crypto/cipherBlock.h"
#include "common/crypto/hash.h"
#include "common/debug.h"
#include "common/io/chunkedRead.h"
#include "common/io/fdWrite.h"
#include "common/io/filter/group.h"
#include "common/io/filter/size.h"

View File

@ -1,167 +0,0 @@
/***********************************************************************************************************************************
Read Chunked I/O
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/io/bufferRead.h"
#include "common/io/read.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct IoChunkedRead
{
IoRead *read; // IoRead to read chunked data from
bool eof; // Has the end of the chunked data been reached?
size_t chunkLast; // Size of the last chunk
size_t chunkRemains; // Remaining data in the current chunk
} IoChunkedRead;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_IO_CHUNKED_READ_TYPE \
IoChunkedRead *
#define FUNCTION_LOG_IO_CHUNKED_READ_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "IoChunkedRead", buffer, bufferSize)
/***********************************************************************************************************************************
Read next chunk size
***********************************************************************************************************************************/
static bool
ioChunkedReadNext(THIS_VOID)
{
THIS(IoChunkedRead);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(IO_CHUNKED_READ, this);
FUNCTION_TEST_END();
const uint64_t chunkDelta = ioReadVarIntU64(this->read);
// Stop when chunk delta is zero, which indicates the end of the chunk list
if (chunkDelta == 0)
{
this->eof = true;
FUNCTION_TEST_RETURN(BOOL, false);
}
// Calculate next chunk size from delta
if (this->chunkLast == 0)
this->chunkRemains = (size_t)chunkDelta;
else
this->chunkRemains = (size_t)(cvtInt64FromZigZag(chunkDelta - 1) + (int64_t)this->chunkLast);
this->chunkLast = this->chunkRemains;
FUNCTION_TEST_RETURN(BOOL, true);
}
/***********************************************************************************************************************************
Read first chunk size
***********************************************************************************************************************************/
static bool
ioChunkedReadOpen(THIS_VOID)
{
THIS(IoChunkedRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNKED_READ, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ioChunkedReadNext(this);
FUNCTION_LOG_RETURN(BOOL, true);
}
/***********************************************************************************************************************************
Read next chunk or partial chunk
***********************************************************************************************************************************/
static size_t
ioChunkedRead(THIS_VOID, Buffer *const buffer, const bool block)
{
THIS(IoChunkedRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNKED_READ, this);
FUNCTION_LOG_PARAM(BUFFER, buffer);
FUNCTION_LOG_PARAM(BOOL, block);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(buffer != NULL);
size_t actualBytes = 0;
// Keep reading until the output buffer is full
while (!bufFull(buffer))
{
// If the entire chunk will fit in the output buffer
if (this->chunkRemains < bufRemains(buffer))
{
bufLimitSet(buffer, bufUsed(buffer) + this->chunkRemains);
ioRead(this->read, buffer);
actualBytes += this->chunkRemains;
this->chunkRemains = 0;
// Read the next chunk header
if (!ioChunkedReadNext(this))
break;
}
// Else only part of the chunk will fit in the output
else
{
actualBytes += bufRemains(buffer);
this->chunkRemains -= bufRemains(buffer);
ioRead(this->read, buffer);
}
}
FUNCTION_LOG_RETURN(SIZE, actualBytes);
}
/***********************************************************************************************************************************
Have all chunks been read?
***********************************************************************************************************************************/
static bool
ioChunkedReadEof(THIS_VOID)
{
THIS(IoChunkedRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNKED_READ, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
FUNCTION_LOG_RETURN(BOOL, this->eof);
}
/**********************************************************************************************************************************/
FN_EXTERN IoRead *
ioChunkedReadNew(IoRead *const read)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_READ, read);
FUNCTION_LOG_END();
ASSERT(read != NULL);
OBJ_NEW_BEGIN(IoChunkedRead, .childQty = MEM_CONTEXT_QTY_MAX)
{
*this = (IoChunkedRead)
{
.read = read,
};
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_READ, ioReadNewP(this, .eof = ioChunkedReadEof, .open = ioChunkedReadOpen, .read = ioChunkedRead));
}

View File

@ -1,161 +0,0 @@
/***********************************************************************************************************************************
Chunk Filter
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/io/filter/chunk.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct IoChunk
{
const uint8_t *buffer; // Internal buffer
size_t bufferSize; // Buffer size
size_t bufferOffset; // Buffer offset
size_t sizeLast; // Size of last chunk
bool done; // Is the filter done?
uint8_t header[CVT_VARINT128_BUFFER_SIZE]; // Chunk header
} IoChunk;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_IO_CHUNK_TYPE \
IoChunk *
#define FUNCTION_LOG_IO_CHUNK_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "IoChunk", buffer, bufferSize)
/***********************************************************************************************************************************
Should the same input be provided again?
***********************************************************************************************************************************/
static bool
ioChunkInputSame(const THIS_VOID)
{
THIS(const IoChunk);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(IO_CHUNK, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->buffer != NULL);
}
/***********************************************************************************************************************************
Is filter done?
***********************************************************************************************************************************/
static bool
ioChunkDone(const THIS_VOID)
{
THIS(const IoChunk);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(IO_CHUNK, this);
FUNCTION_TEST_END();
ASSERT(this != NULL);
FUNCTION_TEST_RETURN(BOOL, this->done && !ioChunkInputSame(this));
}
/***********************************************************************************************************************************
Split data into chunks
***********************************************************************************************************************************/
static void
ioChunkProcess(THIS_VOID, const Buffer *const input, Buffer *const output)
{
THIS(IoChunk);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_CHUNK, this);
FUNCTION_LOG_PARAM(BUFFER, input);
FUNCTION_LOG_PARAM(BUFFER, output);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(output != NULL);
// If there is input to process
if (input != NULL)
{
// Write the chunk size
if (this->buffer == NULL)
{
// Initialize the header with the chunk size
this->buffer = this->header;
this->bufferSize = 0;
this->bufferOffset = 0;
cvtUInt64ToVarInt128(
this->sizeLast == 0 ? bufUsed(input) : cvtInt64ToZigZag((int64_t)bufUsed(input) - (int64_t)this->sizeLast) + 1,
this->header, &this->bufferSize, SIZE_OF_STRUCT_MEMBER(IoChunk, header));
this->sizeLast = bufUsed(input);
}
// Output the chunk
do
{
// Output the entire buffer if possible
if (bufRemains(output) >= this->bufferSize - this->bufferOffset)
{
bufCatC(output, this->buffer, this->bufferOffset, this->bufferSize - this->bufferOffset);
// If the header was written then switch to the chunk
if (this->buffer == this->header)
{
this->buffer = bufPtrConst(input);
this->bufferSize = bufUsed(input);
this->bufferOffset = 0;
}
// Else done writing the chunk
else
this->buffer = NULL;
}
// Else output part of the buffer
else
{
const size_t outputSize = bufRemains(output);
bufCatC(output, this->buffer, this->bufferOffset, outputSize);
this->bufferOffset += outputSize;
}
}
while (ioChunkInputSame(this) && !bufFull(output));
}
// Else processing is complete
else
{
ASSERT(bufRemains(output) > 0);
// Write the terminating zero byte
*(bufPtr(output) + bufUsed(output)) = '\0';
bufUsedInc(output, 1);
this->done = true;
}
FUNCTION_LOG_RETURN_VOID();
}
/**********************************************************************************************************************************/
FN_EXTERN IoFilter *
ioChunkNew(void)
{
FUNCTION_LOG_VOID(logLevelTrace);
OBJ_NEW_BEGIN(IoChunk)
{
*this = (IoChunk){0};
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(
IO_FILTER,
ioFilterNewP(CHUNK_FILTER_TYPE, this, NULL, .done = ioChunkDone, .inOut = ioChunkProcess, .inputSame = ioChunkInputSame));
}

View File

@ -1,21 +0,0 @@
/***********************************************************************************************************************************
Chunk Filter
Split data up into chunks so it can be written (and later read) without knowing the eventual size of the data.
***********************************************************************************************************************************/
#ifndef COMMON_IO_FILTER_CHUNK_H
#define COMMON_IO_FILTER_CHUNK_H
#include "common/io/filter/filter.h"
/***********************************************************************************************************************************
Filter type constant
***********************************************************************************************************************************/
#define CHUNK_FILTER_TYPE STRID5("chunk", 0xb755030)
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoFilter *ioChunkNew(void);
#endif

99
src/common/io/limitRead.c Normal file
View File

@ -0,0 +1,99 @@
/***********************************************************************************************************************************
Read Chunked I/O
***********************************************************************************************************************************/
#include "build.auto.h"
#include "common/debug.h"
#include "common/io/bufferRead.h"
#include "common/io/read.h"
#include "common/log.h"
#include "common/type/object.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct IoLimitRead
{
IoRead *read; // IoRead to read data from
uint64_t limit; // Limit of data to read
uint64_t current; // Current out of data read
} IoLimitRead;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_IO_LIMIT_READ_TYPE \
IoLimitRead *
#define FUNCTION_LOG_IO_LIMIT_READ_FORMAT(value, buffer, bufferSize) \
objNameToLog(value, "IoLimitRead", buffer, bufferSize)
/***********************************************************************************************************************************
Read next chunk or partial chunk
***********************************************************************************************************************************/
static size_t
ioLimitRead(THIS_VOID, Buffer *const buffer, const bool block)
{
THIS(IoLimitRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_LIMIT_READ, this);
FUNCTION_LOG_PARAM(BUFFER, buffer);
FUNCTION_LOG_PARAM(BOOL, block);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(buffer != NULL);
// Continue reading until limit
const size_t result =
this->limit - this->current >= bufRemains(buffer) ? bufRemains(buffer) : (size_t)(this->limit - this->current);
bufLimitSet(buffer, bufUsed(buffer) + result);
ioRead(this->read, buffer);
// Update current read
this->current += result;
FUNCTION_LOG_RETURN(SIZE, result);
}
/***********************************************************************************************************************************
Have all chunks been read?
***********************************************************************************************************************************/
static bool
ioLimitReadEof(THIS_VOID)
{
THIS(IoLimitRead);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_LIMIT_READ, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
FUNCTION_LOG_RETURN(BOOL, this->limit == this->current);
}
/**********************************************************************************************************************************/
FN_EXTERN IoRead *
ioLimitReadNew(IoRead *const read, const uint64_t limit)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(IO_READ, read);
FUNCTION_LOG_PARAM(UINT64, limit);
FUNCTION_LOG_END();
ASSERT(read != NULL);
OBJ_NEW_BEGIN(IoLimitRead, .childQty = MEM_CONTEXT_QTY_MAX)
{
*this = (IoLimitRead)
{
.read = read,
.limit = limit,
};
}
OBJ_NEW_END();
FUNCTION_LOG_RETURN(IO_READ, ioReadNewP(this, .eof = ioLimitReadEof, .read = ioLimitRead));
}

View File

@ -1,16 +1,16 @@
/***********************************************************************************************************************************
Read Chunked I/O
Read Limited Data
Read data that has been chunked with the IoChunk filter.
Read only as much data as specified.
***********************************************************************************************************************************/
#ifndef COMMON_IO_CHUNKEDREAD_H
#define COMMON_IO_CHUNKEDREAD_H
#ifndef COMMON_IO_LIMITREAD_H
#define COMMON_IO_LIMITREAD_H
#include "common/io/read.h"
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN IoRead *ioChunkedReadNew(IoRead *read);
FN_EXTERN IoRead *ioLimitReadNew(IoRead *read, uint64_t limit);
#endif

View File

@ -183,12 +183,10 @@ src_pgbackrest = [
'common/exec.c',
'common/fork.c',
'common/ini.c',
'common/io/chunkedRead.c',
'common/io/client.c',
'common/io/fd.c',
'common/io/fdRead.c',
'common/io/fdWrite.c',
'common/io/filter/chunk.c',
'common/io/filter/size.c',
'common/io/http/client.c',
'common/io/http/common.c',
@ -198,6 +196,7 @@ src_pgbackrest = [
'common/io/http/response.c',
'common/io/http/session.c',
'common/io/http/url.c',
'common/io/limitRead.c',
'common/io/server.c',
'common/io/session.c',
'common/io/socket/client.c',

View File

@ -919,14 +919,6 @@ src/common/io/bufferWrite.h:
class: core
type: c/h
src/common/io/chunkedRead.c:
class: core
type: c
src/common/io/chunkedRead.h:
class: core
type: c/h
src/common/io/client.c:
class: core
type: c
@ -971,14 +963,6 @@ src/common/io/filter/buffer.h:
class: core
type: c/h
src/common/io/filter/chunk.c:
class: core
type: c
src/common/io/filter/chunk.h:
class: core
type: c/h
src/common/io/filter/filter.c:
class: core
type: c
@ -1087,6 +1071,14 @@ src/common/io/io.h:
class: core
type: c/h
src/common/io/limitRead.c:
class: core
type: c
src/common/io/limitRead.h:
class: core
type: c/h
src/common/io/read.c:
class: core
type: c

View File

@ -295,17 +295,16 @@ unit:
coverage:
- common/io/bufferRead
- common/io/bufferWrite
- common/io/chunkedRead
- common/io/fd
- common/io/fdRead
- common/io/fdWrite
- common/io/filter/buffer
- common/io/filter/chunk
- common/io/filter/filter
- common/io/filter/group
- common/io/filter/sink
- common/io/filter/size
- common/io/io
- common/io/limitRead
- common/io/read
- common/io/write

View File

@ -6,7 +6,6 @@ Test Backup Command
#include "common/crypto/hash.h"
#include "common/io/bufferRead.h"
#include "common/io/bufferWrite.h"
#include "common/io/chunkedRead.h"
#include "postgres/interface/static.vendor.h"
#include "storage/helper.h"
#include "storage/posix/storage.h"
@ -1083,15 +1082,15 @@ testRun(void)
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, BUF(bufPtr(destination), bufUsed(destination) - (size_t)mapSize)),
"020031023200", // block 0
"3132", // block 0
"block list");
const Buffer *map = BUF(bufPtr(destination) + (bufUsed(destination) - (size_t)mapSize), (size_t)mapSize);
TEST_RESULT_STR_Z(
testBlockDelta(blockMapNewRead(ioBufferReadNewOpen(map), 3, 8), 3, 8),
"read {reference: 0, bundleId: 0, offset: 0, size: 6}\n"
" super block {max: 2, size: 6}\n"
"read {reference: 0, bundleId: 0, offset: 0, size: 2}\n"
" super block {max: 2, size: 2}\n"
" block {no: 0, offset: 0}\n",
"check delta");
@ -1116,21 +1115,21 @@ testRun(void)
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, BUF(bufPtr(destination), bufUsed(destination) - (size_t)mapSize)),
"02004101424300" // block 0
"02015801595a00" // block 1
"02013101323300", // block 2
"414243" // block 0
"58595a" // block 1
"313233", // block 2
"block list");
map = BUF(bufPtr(destination) + (bufUsed(destination) - (size_t)mapSize), (size_t)mapSize);
TEST_RESULT_STR_Z(
testBlockDelta(blockMapNewRead(ioBufferReadNewOpen(map), 3, 8), 3, 8),
"read {reference: 2, bundleId: 4, offset: 5, size: 21}\n"
" super block {max: 3, size: 7}\n"
"read {reference: 2, bundleId: 4, offset: 5, size: 9}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 0}\n"
" super block {max: 3, size: 7}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 3}\n"
" super block {max: 3, size: 7}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 6}\n",
"check delta");
@ -1156,23 +1155,23 @@ testRun(void)
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, BUF(bufPtr(destination), bufUsed(destination) - (size_t)mapSize)),
"03004143044300" // block 0
"02034000", // block 3
"414343" // block 0
"40", // block 3
"block list");
map = BUF(bufPtr(destination) + (bufUsed(destination) - (size_t)mapSize), (size_t)mapSize);
TEST_RESULT_STR_Z(
testBlockDelta(blockMapNewRead(ioBufferReadNewOpen(map), 3, 8), 3, 8),
"read {reference: 3, bundleId: 0, offset: 0, size: 11}\n"
" super block {max: 3, size: 7}\n"
"read {reference: 3, bundleId: 0, offset: 0, size: 4}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 0}\n"
" super block {max: 1, size: 4}\n"
" super block {max: 1, size: 1}\n"
" block {no: 0, offset: 9}\n"
"read {reference: 2, bundleId: 4, offset: 12, size: 14}\n"
" super block {max: 3, size: 7}\n"
"read {reference: 2, bundleId: 4, offset: 8, size: 6}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 3}\n"
" super block {max: 3, size: 7}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 6}\n",
"check delta");
@ -1198,20 +1197,20 @@ testRun(void)
TEST_RESULT_STR_Z(
strNewEncode(encodingHex, BUF(bufPtr(destination), bufUsed(destination) - (size_t)mapSize)),
"020041014243" // super block 0 / block 0
"01015801595a00" // super block 0 / block 1
"02013101323300", // block 2
"414243" // super block 0 / block 0
"58595a" // super block 0 / block 1
"313233", // block 2
"block list");
map = BUF(bufPtr(destination) + (bufUsed(destination) - (size_t)mapSize), (size_t)mapSize);
TEST_RESULT_STR_Z(
testBlockDelta(blockMapNewRead(ioBufferReadNewOpen(map), 3, 8), 3, 8),
"read {reference: 2, bundleId: 4, offset: 5, size: 20}\n"
" super block {max: 6, size: 13}\n"
"read {reference: 2, bundleId: 4, offset: 5, size: 9}\n"
" super block {max: 6, size: 6}\n"
" block {no: 0, offset: 0}\n"
" block {no: 1, offset: 3}\n"
" super block {max: 3, size: 7}\n"
" super block {max: 3, size: 3}\n"
" block {no: 0, offset: 6}\n",
"check delta");
@ -3718,7 +3717,7 @@ testRun(void)
"P01 DETAIL: backup file " TEST_PATH "/pg1/grow-to-block-incr (bundle 1/0, 16.0KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/16383, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-shrink (bundle 1/24575, 16KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (bundle 1/40996, 2B, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/PG_VERSION (bundle 1/40989, 2B, [PCT]) checksum [SHA1]\n"
"P00 INFO: execute non-exclusive backup stop and wait for all WAL segments to archive\n"
"P00 INFO: backup stop archive = 0000000105DBF06000000001, lsn = 5dbf060/300000\n"
"P00 DETAIL: wrote 'backup_label' file returned from backup stop function\n"
@ -3829,8 +3828,8 @@ testRun(void)
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-larger (1.4MB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-grow (128KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/grow-to-block-incr (bundle 1/0, 16KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/16418, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-shrink (bundle 1/24610, 16.0KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/16411, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-shrink (bundle 1/24603, 16.0KB, [PCT]) checksum [SHA1]\n"
"P00 DETAIL: reference pg_data/PG_VERSION to 20191103-165320F\n"
"P00 INFO: execute non-exclusive backup stop and wait for all WAL segments to archive\n"
"P00 INFO: backup stop archive = 0000000105DC213000000001, lsn = 5dc2130/300000\n"
@ -4035,8 +4034,8 @@ testRun(void)
"P00 INFO: backup start archive = 0000000105DC82D000000000, lsn = 5dc82d0/0\n"
"P00 INFO: check archive for segment 0000000105DC82D000000000\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-age-multiplier (bundle 1/0, 32KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/130, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-grow (bundle 1/218, 48KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/global/pg_control (bundle 1/128, 8KB, [PCT]) checksum [SHA1]\n"
"P01 DETAIL: backup file " TEST_PATH "/pg1/block-incr-grow (bundle 1/216, 48KB, [PCT]) checksum [SHA1]\n"
"P00 DETAIL: reference pg_data/PG_VERSION to 20191108-080000F\n"
"P00 INFO: execute non-exclusive backup stop and wait for all WAL segments to archive\n"
"P00 INFO: backup stop archive = 0000000105DC82D000000001, lsn = 5dc82d0/300000\n"

View File

@ -215,7 +215,7 @@ testRun(void)
"\"size\":65536,"
"\"checksum\":\"1adc95bebe9eea8c112d40cd04ab7a8d75c4f961\","
"\"repo\":{"
"\"size\":65610"
"\"size\":65594"
"},"
"\"bundle\":{"
"\"id\":1,"
@ -230,7 +230,7 @@ testRun(void)
"\"reference\":0,"
"\"read\":{"
"\"total\":1,"
"\"size\":65552"
"\"size\":65536"
"},"
"\"superBlock\":{"
"\"total\":2,"
@ -271,7 +271,7 @@ testRun(void)
"\n"
"file list:\n"
" - pg_data/base/1/2\n"
" size: 96KB, repo 64.1KB\n"
" size: 96KB, repo 64KB\n"
" checksum: d4976e362696a43fb09e7d4e780d7d9352a2ec2e\n"
" bundle: 1\n"
" block: size 8KB, map size 99B, checksum size 6B\n"
@ -376,7 +376,7 @@ testRun(void)
"\n"
"file list:\n"
" - pg_data/base/1/2\n"
" size: 96KB, repo 64.1KB\n"
" size: 96KB, repo 64KB\n"
" checksum: d4976e362696a43fb09e7d4e780d7d9352a2ec2e\n"
" bundle: 1\n"
" block: size 8KB, map size 99B, checksum size 6B\n"
@ -417,7 +417,7 @@ testRun(void)
"\"size\":98304,"
"\"checksum\":\"d4976e362696a43fb09e7d4e780d7d9352a2ec2e\","
"\"repo\":{"
"\"size\":65647"
"\"size\":65635"
"},"
"\"bundle\":{"
"\"id\":1,"
@ -470,7 +470,7 @@ testRun(void)
"\n"
"file list:\n"
" - pg_data/base/1/2\n"
" size: 96KB, repo 64.1KB\n"
" size: 96KB, repo 64KB\n"
" checksum: d4976e362696a43fb09e7d4e780d7d9352a2ec2e\n"
" bundle: 1\n"
" block: size 8KB, map size 99B, checksum size 6B\n"
@ -538,7 +538,7 @@ testRun(void)
"\"size\":98304,"
"\"checksum\":\"d4976e362696a43fb09e7d4e780d7d9352a2ec2e\","
"\"repo\":{"
"\"size\":65668"
"\"size\":65664"
"},"
"\"bundle\":{"
"\"id\":1,"

View File

@ -761,44 +761,27 @@ testRun(void)
}
// *****************************************************************************************************************************
if (testBegin("IoChunkedRead and ioChunkedWrite"))
if (testBegin("IoLimitRead"))
{
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("write chunks");
ioBufferSizeSet(3);
Buffer *destination = bufNew(256);
IoWrite *write = ioBufferWriteNew(destination);
ioFilterGroupAdd(ioWriteFilterGroup(write), ioChunkNew());
ioWriteOpen(write);
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("ABC")), "write");
TEST_RESULT_VOID(ioWrite(write, BUFSTRDEF("DEF")), "write");
TEST_RESULT_VOID(ioWriteClose(write), "close");
TEST_RESULT_STR_Z(strNewEncode(encodingHex, destination), "034142430144454600", "check");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("read chunks");
TEST_TITLE("read limit");
const Buffer *input = BUFSTRZ("ABCDEF");
ioBufferSizeSet(2);
IoRead *read = ioChunkedReadNew(ioBufferReadNewOpen(destination));
IoRead *read = ioLimitReadNew(ioBufferReadNewOpen(input), 5);
ioReadOpen(read);
Buffer *actual = bufNew(3);
TEST_RESULT_UINT(ioRead(read, actual), 3, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "ABC", "check");
Buffer *output = bufNew(2);
TEST_RESULT_UINT(ioRead(read, output), 2, "read");
TEST_RESULT_STR_Z(strNewBuf(output), "AB", "check");
actual = bufNew(1);
TEST_RESULT_UINT(ioRead(read, actual), 1, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "D", "check");
bufUsedZero(output);
TEST_RESULT_UINT(ioRead(read, output), 2, "read");
TEST_RESULT_STR_Z(strNewBuf(output), "CD", "check");
actual = bufNew(3);
TEST_RESULT_UINT(ioRead(read, actual), 2, "read");
TEST_RESULT_STR_Z(strNewBuf(actual), "EF", "check");
actual = bufNew(2);
TEST_RESULT_UINT(ioRead(read, actual), 0, "eof");
bufUsedZero(output);
TEST_RESULT_UINT(ioRead(read, output), 1, "read");
TEST_RESULT_STR_Z(strNewBuf(output), "E", "check");
}
FUNCTION_HARNESS_RETURN_VOID();