1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-11-06 08:49:29 +02:00

Update default block size and super block values based on testing.

Block sizes are incremented when the size of the map becomes as large as a single block. This is arbitrary but it appears to give a good balance of block size vs map size.

The full backup super block size is set to minimize loss of compression efficiency since most blocks in the database will likely never be modified. For diff/incr backup super blocks, a smaller size is allowable since only modified blocks are stored. The overall savings of not storing unmodified blocks offsets the small loss in compression efficiency due to the smaller super block and allows more granular fetches during restore.
This commit is contained in:
David Steele
2023-03-10 14:01:38 +07:00
parent 1119a53539
commit 6b409d049e
5 changed files with 26 additions and 22 deletions

View File

@@ -51,6 +51,7 @@
<commit subject="Use xxHash instead of SHA-1 for block incremental checksums."/>
<commit subject="Exclude backup set size from info for block incremental backups."/>
<commit subject="Rename BlockHash to BlockChecksum."/>
<commit subject="Update default block size and super block values based on testing."/>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>

View File

@@ -1873,8 +1873,8 @@ option:
section: global
group: repo
type: size
default: 1MiB
allow-range: [128KiB, 16MiB]
default: 256KiB
allow-range: [32KiB, 16MiB]
internal: true
command: repo-block
command-role:
@@ -1886,7 +1886,7 @@ option:
repo-block-size-super-full:
inherit: repo-block-size-super
default: 4MiB
default: 1MiB
repo-cipher-pass:
section: global

View File

@@ -270,19 +270,21 @@ backupInit(const InfoBackup *infoBackup)
/**********************************************************************************************************************************
Build block incremental maps
***********************************************************************************************************************************/
// Size map
// Size map. Block size is increased when the block map would be larger than a single block. The break can be calculated with this
// formula: [block size in KiB] / (1024 / [block size in KiB] * [checksum size + 1]) * 1073741824.
static const ManifestBlockIncrSizeMap manifestBlockIncrSizeMapDefault[] =
{
{.fileSize = 1024 * 1024 * 1024, .blockSize = 1024 * 1024},
{.fileSize = 512 * 1024 * 1024, .blockSize = 768 * 1024},
{.fileSize = 256 * 1024 * 1024, .blockSize = 512 * 1024},
{.fileSize = 64 * 1024 * 1024, .blockSize = 384 * 1024},
{.fileSize = 16 * 1024 * 1024, .blockSize = 256 * 1024},
{.fileSize = 4 * 1024 * 1024, .blockSize = 192 * 1024},
{.fileSize = 2 * 1024 * 1024, .blockSize = 128 * 1024},
{.fileSize = 1024 * 1024, .blockSize = 64 * 1024},
{.fileSize = 512 * 1024, .blockSize = 32 * 1024},
{.fileSize = 128 * 1024, .blockSize = 16 * 1024},
{.fileSize = 968 * 1024 * 1024, .blockSize = 96 * 1024},
{.fileSize = 800 * 1024 * 1024, .blockSize = 88 * 1024},
{.fileSize = 648 * 1024 * 1024, .blockSize = 80 * 1024},
{.fileSize = 512 * 1024 * 1024, .blockSize = 72 * 1024},
{.fileSize = 392 * 1024 * 1024, .blockSize = 64 * 1024},
{.fileSize = 288 * 1024 * 1024, .blockSize = 56 * 1024},
{.fileSize = 200 * 1024 * 1024, .blockSize = 48 * 1024},
{.fileSize = 128 * 1024 * 1024, .blockSize = 40 * 1024},
{.fileSize = 84256 * 1024, .blockSize = 32 * 1024}, // These do not come out evenly because the map record size is 7
{.fileSize = 37448 * 1024, .blockSize = 24 * 1024}, // instead of 8 in the rows above
{.fileSize = 9360 * 1024, .blockSize = 16 * 1024},
{.fileSize = 16 * 1024, .blockSize = 8 * 1024},
};

View File

@@ -24,10 +24,10 @@ static const StringPub parseRuleValueStr[] =
PARSE_RULE_STRPUB("1MiB"), // val/str
PARSE_RULE_STRPUB("2"), // val/str
PARSE_RULE_STRPUB("20MiB"), // val/str
PARSE_RULE_STRPUB("256KiB"), // val/str
PARSE_RULE_STRPUB("2MiB"), // val/str
PARSE_RULE_STRPUB("3"), // val/str
PARSE_RULE_STRPUB("443"), // val/str
PARSE_RULE_STRPUB("4MiB"), // val/str
PARSE_RULE_STRPUB("5432"), // val/str
PARSE_RULE_STRPUB("60"), // val/str
PARSE_RULE_STRPUB("8432"), // val/str
@@ -74,10 +74,10 @@ typedef enum
parseRuleValStrQT_1MiB_QT, // val/str/enum
parseRuleValStrQT_2_QT, // val/str/enum
parseRuleValStrQT_20MiB_QT, // val/str/enum
parseRuleValStrQT_256KiB_QT, // val/str/enum
parseRuleValStrQT_2MiB_QT, // val/str/enum
parseRuleValStrQT_3_QT, // val/str/enum
parseRuleValStrQT_443_QT, // val/str/enum
parseRuleValStrQT_4MiB_QT, // val/str/enum
parseRuleValStrQT_5432_QT, // val/str/enum
parseRuleValStrQT_60_QT, // val/str/enum
parseRuleValStrQT_8432_QT, // val/str/enum
@@ -5054,14 +5054,14 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
// opt/repo-block-size-super
PARSE_RULE_OPTIONAL_ALLOW_RANGE // opt/repo-block-size-super
( // opt/repo-block-size-super
PARSE_RULE_VAL_INT(parseRuleValInt131072), // opt/repo-block-size-super
PARSE_RULE_VAL_INT(parseRuleValInt32768), // opt/repo-block-size-super
PARSE_RULE_VAL_INT(parseRuleValInt16777216), // opt/repo-block-size-super
), // opt/repo-block-size-super
// opt/repo-block-size-super
PARSE_RULE_OPTIONAL_DEFAULT // opt/repo-block-size-super
( // opt/repo-block-size-super
PARSE_RULE_VAL_INT(parseRuleValInt1048576), // opt/repo-block-size-super
PARSE_RULE_VAL_STR(parseRuleValStrQT_1MiB_QT), // opt/repo-block-size-super
PARSE_RULE_VAL_INT(parseRuleValInt262144), // opt/repo-block-size-super
PARSE_RULE_VAL_STR(parseRuleValStrQT_256KiB_QT), // opt/repo-block-size-super
), // opt/repo-block-size-super
), // opt/repo-block-size-super
), // opt/repo-block-size-super
@@ -5094,14 +5094,14 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
// opt/repo-block-size-super-full
PARSE_RULE_OPTIONAL_ALLOW_RANGE // opt/repo-block-size-super-full
( // opt/repo-block-size-super-full
PARSE_RULE_VAL_INT(parseRuleValInt131072), // opt/repo-block-size-super-full
PARSE_RULE_VAL_INT(parseRuleValInt32768), // opt/repo-block-size-super-full
PARSE_RULE_VAL_INT(parseRuleValInt16777216), // opt/repo-block-size-super-full
), // opt/repo-block-size-super-full
// opt/repo-block-size-super-full
PARSE_RULE_OPTIONAL_DEFAULT // opt/repo-block-size-super-full
( // opt/repo-block-size-super-full
PARSE_RULE_VAL_INT(parseRuleValInt4194304), // opt/repo-block-size-super-full
PARSE_RULE_VAL_STR(parseRuleValStrQT_4MiB_QT), // opt/repo-block-size-super-full
PARSE_RULE_VAL_INT(parseRuleValInt1048576), // opt/repo-block-size-super-full
PARSE_RULE_VAL_STR(parseRuleValStrQT_1MiB_QT), // opt/repo-block-size-super-full
), // opt/repo-block-size-super-full
), // opt/repo-block-size-super-full
), // opt/repo-block-size-super-full

View File

@@ -4050,6 +4050,7 @@ testRun(void)
hrnCfgArgRawZ(argList, cfgOptRepoBlockSizeMap, STRINGIFY(BLOCK_MAX_FILE_SIZE) "=" STRINGIFY(BLOCK_MAX_SIZE));
hrnCfgArgRawZ(argList, cfgOptRepoBlockSizeMap, STRINGIFY(BLOCK_MIN_FILE_SIZE) "=" STRINGIFY(BLOCK_MIN_SIZE));
hrnCfgArgRawZ(argList, cfgOptRepoBlockSizeMap, STRINGIFY(BLOCK_MID_FILE_SIZE) "=" STRINGIFY(BLOCK_MID_SIZE));
hrnCfgArgRawZ(argList, cfgOptRepoBlockSizeSuper, "1MiB");
HRN_CFG_LOAD(cfgCmdBackup, argList);
// Grow file size to check block incr delta. This is large enough that it would get a new block size if it were a new