1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2024-12-12 10:04:14 +02:00

Add --repo-storage-tag option to create object tags.

This new option allows tags to be added to objects in S3, GCS, and Azure repositories.

This was fairly straightforward for S3 and Azure, but GCS does not allow tags for a simple upload using the JSON interface. If tags are required then the resumable interface must be used even if the file falls below the limit that usually triggers a resumable upload (i.e. size < repo-storage-upload-chunk-size).

This option is structured so that tags must be specified per-repo rather than globally for all repos. This seems logical since the tag keys and values may vary by service, e.g. S3 vs GCS.

These storage tags are independent of backup annotations since they are likely to be used for different purposes, e.g. billing, while the backup annotations are primarily intended for monitoring.
This commit is contained in:
David Steele 2023-09-14 08:22:21 -04:00 committed by GitHub
parent 3b9c31f6e3
commit 1b4e0cce5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 358 additions and 58 deletions

View File

@ -1,6 +1,20 @@
<release date="XXXX-XX-XX" version="2.48dev" title="Under Development">
<release-core-list>
<release-feature-list>
<release-item>
<github-issue id="2150"/>
<github-pull-request id="2159"/>
<release-item-contributor-list>
<release-item-contributor id="david.steele"/>
<release-item-reviewer id="stephen.frost"/>
<release-item-reviewer id="stefan.fercot"/>
<release-item-reviewer id="timothee.peignier"/>
</release-item-contributor-list>
<p>Add <br-option>--repo-storage-tag</br-option> option to create object tags.</p>
</release-item>
<release-item>
<commit subject="Adjust Wait object to be more accurate when nested."/>
<commit subject="Aggregate error retries in ErrorRetry output."/>

View File

@ -910,6 +910,11 @@
<contributor-id type="github">ralfthewise</contributor-id>
</contributor>
<contributor id="timothee.peignier">
<contributor-name-display>Timoth&amp;eacute;e Peignier</contributor-name-display>
<contributor-id type="github">cyberdelia</contributor-id>
</contributor>
<contributor id="todd.vernick">
<contributor-name-display>Todd Vernick</contributor-name-display>
<contributor-id type="github">gintoddic</contributor-id>

View File

@ -2529,6 +2529,7 @@
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:PutObjectTagging",
"s3:GetObject",
"s3:DeleteObject"
],

View File

@ -2489,6 +2489,19 @@ option:
repo?-azure-port: {}
repo?-s3-port: {}
repo-storage-tag:
section: global
group: repo
type: hash
required: false
command: repo-type
depend:
option: repo-type
list:
- azure
- gcs
- s3
repo-storage-upload-chunk-size:
section: global
group: repo

View File

@ -1127,6 +1127,18 @@
<example>9000</example>
</config-key>
<config-key id="repo-storage-tag" name="Repository Storage Tag(s)">
<summary>Repository storage tag(s).</summary>
<text>
<p>Specify tags that will be added to objects when the repository is an object store (e.g. S3). The option can be repeated to add multiple tags.</p>
<p>There is no provision in <backrest/> to modify these tags so be sure to set them correctly before running <cmd>stanza-create</cmd> to ensure uniform tags across the entire repository.</p>
</text>
<example>key1=value1</example>
</config-key>
<config-key id="repo-storage-upload-chunk-size" name="Repository Storage Upload Chunk Size">
<summary>Repository storage upload chunk size.</summary>

View File

@ -22,6 +22,7 @@ FN_EXTERN HttpQuery *
httpQueryNew(HttpQueryNewParam param)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(KEY_VALUE, param.kv);
FUNCTION_TEST_PARAM(STRING_LIST, param.redactList);
FUNCTION_TEST_END();
@ -29,7 +30,7 @@ httpQueryNew(HttpQueryNewParam param)
{
*this = (HttpQuery)
{
.kv = kvNew(),
.kv = param.kv != NULL ? kvDup(param.kv) : kvNew(),
.redactList = strLstDup(param.redactList),
};
}

View File

@ -20,6 +20,7 @@ Constructors
typedef struct HttpQueryNewParam
{
VAR_PARAM_HEADER;
const KeyValue *kv; // Initial query key/value list
const StringList *redactList; // List of keys to redact values for
} HttpQueryNewParam;

View File

@ -135,7 +135,7 @@ Option constants
#define CFGOPT_TYPE "type"
#define CFGOPT_VERBOSE "verbose"
#define CFG_OPTION_TOTAL 175
#define CFG_OPTION_TOTAL 176
/***********************************************************************************************************************************
Option value constants
@ -520,6 +520,7 @@ typedef enum
cfgOptRepoStorageCaPath,
cfgOptRepoStorageHost,
cfgOptRepoStoragePort,
cfgOptRepoStorageTag,
cfgOptRepoStorageUploadChunkSize,
cfgOptRepoStorageVerifyTls,
cfgOptRepoType,

View File

@ -8886,6 +8886,89 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
), // opt/repo-storage-port
), // opt/repo-storage-port
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTION_NAME("repo-storage-tag"), // opt/repo-storage-tag
PARSE_RULE_OPTION_TYPE(cfgOptTypeHash), // opt/repo-storage-tag
PARSE_RULE_OPTION_RESET(true), // opt/repo-storage-tag
PARSE_RULE_OPTION_REQUIRED(false), // opt/repo-storage-tag
PARSE_RULE_OPTION_SECTION(cfgSectionGlobal), // opt/repo-storage-tag
PARSE_RULE_OPTION_MULTI(true), // opt/repo-storage-tag
PARSE_RULE_OPTION_GROUP_MEMBER(true), // opt/repo-storage-tag
PARSE_RULE_OPTION_GROUP_ID(cfgOptGrpRepo), // opt/repo-storage-tag
// opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND_ROLE_MAIN_VALID_LIST // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdAnnotate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdExpire) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdManifest) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoLs) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) // opt/repo-storage-tag
), // opt/repo-storage-tag
// opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND_ROLE_ASYNC_VALID_LIST // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) // opt/repo-storage-tag
), // opt/repo-storage-tag
// opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND_ROLE_LOCAL_VALID_LIST // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdBackup) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) // opt/repo-storage-tag
), // opt/repo-storage-tag
// opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND_ROLE_REMOTE_VALID_LIST // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdAnnotate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchiveGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdArchivePush) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdCheck) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdInfo) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdManifest) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoCreate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoGet) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoLs) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoPut) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRepoRm) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdRestore) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaCreate) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaDelete) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdStanzaUpgrade) // opt/repo-storage-tag
PARSE_RULE_OPTION_COMMAND(cfgCmdVerify) // opt/repo-storage-tag
), // opt/repo-storage-tag
// opt/repo-storage-tag
PARSE_RULE_OPTIONAL // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTIONAL_GROUP // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_OPTIONAL_DEPEND // opt/repo-storage-tag
( // opt/repo-storage-tag
PARSE_RULE_VAL_OPT(cfgOptRepoType), // opt/repo-storage-tag
PARSE_RULE_VAL_STRID(parseRuleValStrIdAzure), // opt/repo-storage-tag
PARSE_RULE_VAL_STRID(parseRuleValStrIdGcs), // opt/repo-storage-tag
PARSE_RULE_VAL_STRID(parseRuleValStrIdS3), // opt/repo-storage-tag
), // opt/repo-storage-tag
), // opt/repo-storage-tag
), // opt/repo-storage-tag
), // opt/repo-storage-tag
// -----------------------------------------------------------------------------------------------------------------------------
PARSE_RULE_OPTION // opt/repo-storage-upload-chunk-size
( // opt/repo-storage-upload-chunk-size
PARSE_RULE_OPTION_NAME("repo-storage-upload-chunk-size"), // opt/repo-storage-upload-chunk-size
@ -10698,6 +10781,7 @@ static const uint8_t optionResolveOrder[] =
cfgOptRepoStorageCaPath, // opt-resolve-order
cfgOptRepoStorageHost, // opt-resolve-order
cfgOptRepoStoragePort, // opt-resolve-order
cfgOptRepoStorageTag, // opt-resolve-order
cfgOptRepoStorageUploadChunkSize, // opt-resolve-order
cfgOptRepoStorageVerifyTls, // opt-resolve-order
cfgOptTarget, // opt-resolve-order

View File

@ -79,7 +79,8 @@ storageAzureHelper(const unsigned int repoIdx, const bool write, StoragePathExpr
result = storageAzureNew(
cfgOptionIdxStr(cfgOptRepoPath, repoIdx), write, pathExpressionCallback,
cfgOptionIdxStr(cfgOptRepoAzureContainer, repoIdx), cfgOptionIdxStr(cfgOptRepoAzureAccount, repoIdx), keyType, key,
(size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx), endpoint, uriStyle, port, ioTimeoutMs(),
(size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx),
cfgOptionIdxKvNull(cfgOptRepoStorageTag, repoIdx), endpoint, uriStyle, port, ioTimeoutMs(),
cfgOptionIdxBool(cfgOptRepoStorageVerifyTls, repoIdx), cfgOptionIdxStrNull(cfgOptRepoStorageCaFile, repoIdx),
cfgOptionIdxStrNull(cfgOptRepoStorageCaPath, repoIdx));
}

View File

@ -22,8 +22,9 @@ Azure Storage
/***********************************************************************************************************************************
Azure http headers
***********************************************************************************************************************************/
STRING_STATIC(AZURE_HEADER_TAGS, "x-ms-tags");
STRING_STATIC(AZURE_HEADER_VERSION_STR, "x-ms-version");
STRING_STATIC(AZURE_HEADER_VERSION_VALUE_STR, "2019-02-02");
STRING_STATIC(AZURE_HEADER_VERSION_VALUE_STR, "2019-12-12");
/***********************************************************************************************************************************
Azure query tokens
@ -66,6 +67,7 @@ struct StorageAzure
const HttpQuery *sasKey; // SAS key
const String *host; // Host name
size_t blockSize; // Block size for multi-block upload
const String *tag; // Tags to be applied to objects
const String *pathPrefix; // Account/container prefix
uint64_t fileId; // Id to used to make file block identifiers unique
@ -192,6 +194,7 @@ storageAzureRequestAsync(StorageAzure *this, const String *verb, StorageAzureReq
FUNCTION_LOG_PARAM(HTTP_HEADER, param.header);
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
FUNCTION_LOG_PARAM(BUFFER, param.content);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_END();
ASSERT(this != NULL);
@ -221,6 +224,10 @@ storageAzureRequestAsync(StorageAzure *this, const String *verb, StorageAzureReq
strNewEncode(encodingBase64, cryptoHashOne(hashTypeMd5, param.content)));
}
// Set tags when requested and available
if (param.tag && this->tag != NULL)
httpHeaderPut(requestHeader, AZURE_HEADER_TAGS, this->tag);
// Encode path
const String *const path = httpUriEncode(param.path, true);
@ -288,10 +295,11 @@ storageAzureRequest(StorageAzure *this, const String *verb, StorageAzureRequestP
FUNCTION_LOG_PARAM(BUFFER, param.content);
FUNCTION_LOG_PARAM(BOOL, param.allowMissing);
FUNCTION_LOG_PARAM(BOOL, param.contentIo);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_END();
HttpRequest *const request = storageAzureRequestAsyncP(
this, verb, .path = param.path, .header = param.header, .query = param.query, .content = param.content);
this, verb, .path = param.path, .header = param.header, .query = param.query, .content = param.content, .tag = param.tag);
HttpResponse *const result = storageAzureResponseP(request, .allowMissing = param.allowMissing, .contentIo = param.contentIo);
httpRequestFree(request);
@ -706,8 +714,8 @@ FN_EXTERN Storage *
storageAzureNew(
const String *const path, const bool write, StoragePathExpressionCallback pathExpressionFunction, const String *const container,
const String *const account, const StorageAzureKeyType keyType, const String *const key, const size_t blockSize,
const String *const endpoint, const StorageAzureUriStyle uriStyle, const unsigned int port, const TimeMSec timeout,
const bool verifyPeer, const String *const caFile, const String *const caPath)
const KeyValue *const tag, const String *const endpoint, const StorageAzureUriStyle uriStyle, const unsigned int port,
const TimeMSec timeout, const bool verifyPeer, const String *const caFile, const String *const caPath)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, path);
@ -718,6 +726,7 @@ storageAzureNew(
FUNCTION_LOG_PARAM(STRING_ID, keyType);
FUNCTION_TEST_PARAM(STRING, key);
FUNCTION_LOG_PARAM(SIZE, blockSize);
FUNCTION_LOG_PARAM(KEY_VALUE, tag);
FUNCTION_LOG_PARAM(STRING, endpoint);
FUNCTION_LOG_PARAM(ENUM, uriStyle);
FUNCTION_LOG_PARAM(UINT, port);
@ -748,6 +757,14 @@ storageAzureNew(
strNewFmt("/%s", strZ(container)) : strNewFmt("/%s/%s", strZ(account), strZ(container)),
};
// Create tag query string
if (tag != NULL)
{
HttpQuery *const query = httpQueryNewP(.kv = tag);
this->tag = httpQueryRenderP(query);
httpQueryFree(query);
}
// Store shared key or parse sas query
if (keyType == storageAzureKeyTypeShared)
this->sharedKey = bufNewDecode(encodingBase64, key);

View File

@ -34,8 +34,8 @@ Constructors
***********************************************************************************************************************************/
FN_EXTERN Storage *storageAzureNew(
const String *path, bool write, StoragePathExpressionCallback pathExpressionFunction, const String *container,
const String *account, StorageAzureKeyType keyType, const String *key, size_t blockSize, const String *endpoint,
StorageAzureUriStyle uriStyle, unsigned int port, TimeMSec timeout, bool verifyPeer, const String *caFile,
const String *caPath);
const String *account, StorageAzureKeyType keyType, const String *key, size_t blockSize, const KeyValue *tag,
const String *endpoint, StorageAzureUriStyle uriStyle, unsigned int port, TimeMSec timeout, bool verifyPeer,
const String *caFile, const String *caPath);
#endif

View File

@ -33,6 +33,7 @@ typedef struct StorageAzureRequestAsyncParam
const HttpHeader *header; // Request headers
const HttpQuery *query; // Query parameters
const Buffer *content; // Request content
bool tag; // Add tags when available?
} StorageAzureRequestAsyncParam;
#define storageAzureRequestAsyncP(this, verb, ...) \
@ -62,6 +63,7 @@ typedef struct StorageAzureRequestParam
const Buffer *content; // Request content
bool allowMissing; // Allow missing files (caller can check response code)
bool contentIo; // Is IoRead interface required to read content?
bool tag; // Add tags when available?
} StorageAzureRequestParam;
#define storageAzureRequestP(this, verb, ...) \

View File

@ -240,7 +240,7 @@ storageWriteAzureClose(THIS_VOID)
storageAzureRequestP(
this->storage, HTTP_VERB_PUT_STR, .path = this->interface.name,
.query = httpQueryAdd(httpQueryNewP(), AZURE_QUERY_COMP_STR, AZURE_QUERY_VALUE_BLOCK_LIST_STR),
.content = xmlDocumentBuf(blockXml));
.content = xmlDocumentBuf(blockXml), .tag = true);
}
// Else upload all the data in a single block
else
@ -248,7 +248,7 @@ storageWriteAzureClose(THIS_VOID)
storageAzureRequestP(
this->storage, HTTP_VERB_PUT_STR, .path = this->interface.name,
httpHeaderAdd(httpHeaderNew(NULL), AZURE_HEADER_BLOB_TYPE_STR, AZURE_HEADER_VALUE_BLOCK_BLOB_STR),
.content = this->blockBuffer);
.content = this->blockBuffer, .tag = true);
}
bufFree(this->blockBuffer);

View File

@ -24,9 +24,9 @@ storageGcsHelper(const unsigned int repoIdx, const bool write, StoragePathExpres
Storage *const result = storageGcsNew(
cfgOptionIdxStr(cfgOptRepoPath, repoIdx), write, pathExpressionCallback, cfgOptionIdxStr(cfgOptRepoGcsBucket, repoIdx),
(StorageGcsKeyType)cfgOptionIdxStrId(cfgOptRepoGcsKeyType, repoIdx), cfgOptionIdxStrNull(cfgOptRepoGcsKey, repoIdx),
(size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx), cfgOptionIdxStr(cfgOptRepoGcsEndpoint, repoIdx),
ioTimeoutMs(), cfgOptionIdxBool(cfgOptRepoStorageVerifyTls, repoIdx), cfgOptionIdxStrNull(cfgOptRepoStorageCaFile, repoIdx),
cfgOptionIdxStrNull(cfgOptRepoStorageCaPath, repoIdx));
(size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx), cfgOptionIdxKvNull(cfgOptRepoStorageTag, repoIdx),
cfgOptionIdxStr(cfgOptRepoGcsEndpoint, repoIdx), ioTimeoutMs(), cfgOptionIdxBool(cfgOptRepoStorageVerifyTls, repoIdx),
cfgOptionIdxStrNull(cfgOptRepoStorageCaFile, repoIdx), cfgOptionIdxStrNull(cfgOptRepoStorageCaPath, repoIdx));
FUNCTION_LOG_RETURN(STORAGE, result);
}

View File

@ -87,6 +87,7 @@ struct StorageGcs
const String *bucket; // Bucket to store data in
const String *endpoint; // Endpoint
size_t chunkSize; // Block size for resumable upload
const Buffer *tag; // Tags to be applied to objects
StorageGcsKeyType keyType; // Auth key type
const String *key; // Key (value depends on key type)
@ -391,6 +392,7 @@ storageGcsRequestAsync(StorageGcs *this, const String *verb, StorageGcsRequestAs
FUNCTION_LOG_PARAM(BOOL, param.noBucket);
FUNCTION_LOG_PARAM(BOOL, param.upload);
FUNCTION_LOG_PARAM(BOOL, param.noAuth);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_PARAM(STRING, param.object);
FUNCTION_LOG_PARAM(HTTP_HEADER, param.header);
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
@ -414,10 +416,20 @@ storageGcsRequestAsync(StorageGcs *this, const String *verb, StorageGcsRequestAs
if (param.object != NULL)
strCatFmt(path, "/%s", strZ(httpUriEncode(strSub(param.object, 1), false)));
// Create header list and add content length
// Create header list
HttpHeader *requestHeader =
param.header == NULL ? httpHeaderNew(this->headerRedactList) : httpHeaderDup(param.header, this->headerRedactList);
// Add tags
if (param.tag)
{
ASSERT(param.content == NULL);
ASSERT(this->tag != NULL);
httpHeaderPut(requestHeader, HTTP_HEADER_CONTENT_TYPE_STR, HTTP_HEADER_CONTENT_TYPE_JSON_STR);
param.content = this->tag;
}
// Set host
httpHeaderPut(requestHeader, HTTP_HEADER_HOST_STR, this->endpoint);
@ -488,6 +500,7 @@ storageGcsRequest(StorageGcs *const this, const String *const verb, const Storag
FUNCTION_LOG_PARAM(BOOL, param.noBucket);
FUNCTION_LOG_PARAM(BOOL, param.upload);
FUNCTION_LOG_PARAM(BOOL, param.noAuth);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_PARAM(STRING, param.object);
FUNCTION_LOG_PARAM(HTTP_HEADER, param.header);
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
@ -498,8 +511,8 @@ storageGcsRequest(StorageGcs *const this, const String *const verb, const Storag
FUNCTION_LOG_END();
HttpRequest *const request = storageGcsRequestAsyncP(
this, verb, .noBucket = param.noBucket, .upload = param.upload, .noAuth = param.noAuth, .object = param.object,
.header = param.header, .query = param.query, .content = param.content);
this, verb, .noBucket = param.noBucket, .upload = param.upload, .noAuth = param.noAuth, .tag = param.tag,
.object = param.object, .header = param.header, .query = param.query, .content = param.content);
HttpResponse *const result = storageGcsResponseP(
request, .allowMissing = param.allowMissing, .allowIncomplete = param.allowIncomplete, .contentIo = param.contentIo);
@ -836,7 +849,7 @@ storageGcsNewWrite(THIS_VOID, const String *file, StorageInterfaceNewWriteParam
ASSERT(param.group == NULL);
ASSERT(param.timeModified == 0);
FUNCTION_LOG_RETURN(STORAGE_WRITE, storageWriteGcsNew(this, file, this->chunkSize));
FUNCTION_LOG_RETURN(STORAGE_WRITE, storageWriteGcsNew(this, file, this->chunkSize, this->tag != NULL));
}
/**********************************************************************************************************************************/
@ -953,8 +966,9 @@ static const StorageInterface storageInterfaceGcs =
FN_EXTERN Storage *
storageGcsNew(
const String *const path, const bool write, StoragePathExpressionCallback pathExpressionFunction, const String *const bucket,
const StorageGcsKeyType keyType, const String *const key, const size_t chunkSize, const String *const endpoint,
const TimeMSec timeout, const bool verifyPeer, const String *const caFile, const String *const caPath)
const StorageGcsKeyType keyType, const String *const key, const size_t chunkSize, const KeyValue *const tag,
const String *const endpoint, const TimeMSec timeout, const bool verifyPeer, const String *const caFile,
const String *const caPath)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, path);
@ -964,6 +978,7 @@ storageGcsNew(
FUNCTION_LOG_PARAM(STRING_ID, keyType);
FUNCTION_TEST_PARAM(STRING, key);
FUNCTION_LOG_PARAM(SIZE, chunkSize);
FUNCTION_LOG_PARAM(KEY_VALUE, tag);
FUNCTION_LOG_PARAM(STRING, endpoint);
FUNCTION_LOG_PARAM(TIME_MSEC, timeout);
FUNCTION_LOG_PARAM(BOOL, verifyPeer);
@ -987,6 +1002,32 @@ storageGcsNew(
.chunkSize = chunkSize,
};
// Create tag JSON buffer
if (write && tag != NULL)
{
MEM_CONTEXT_TEMP_BEGIN()
{
JsonWrite *const tagJson = jsonWriteObjectBegin(
jsonWriteKeyStrId(jsonWriteObjectBegin(jsonWriteNewP()), STRID5("metadata", 0xd0240d0ad0)));
const StringList *const keyList = strLstSort(strLstNewVarLst(kvKeyList(tag)), sortOrderAsc);
for (unsigned int keyIdx = 0; keyIdx < strLstSize(keyList); keyIdx++)
{
const String *const key = strLstGet(keyList, keyIdx);
jsonWriteStr(jsonWriteKey(tagJson, key), varStr(kvGet(tag, VARSTR(key))));
}
const String *const tagStr = jsonWriteResult(jsonWriteObjectEnd(jsonWriteObjectEnd(tagJson)));
MEM_CONTEXT_PRIOR_BEGIN()
{
this->tag = bufDup(BUFSTR(tagStr));
}
MEM_CONTEXT_PRIOR_END();
}
MEM_CONTEXT_TEMP_END();
}
// Handle auth key types
switch (keyType)
{

View File

@ -26,7 +26,7 @@ Constructors
***********************************************************************************************************************************/
FN_EXTERN Storage *storageGcsNew(
const String *path, bool write, StoragePathExpressionCallback pathExpressionFunction, const String *bucket,
StorageGcsKeyType keyType, const String *key, size_t blockSize, const String *endpoint, TimeMSec timeout, bool verifyPeer,
const String *caFile, const String *caPath);
StorageGcsKeyType keyType, const String *key, size_t blockSize, const KeyValue *tag, const String *endpoint, TimeMSec timeout,
bool verifyPeer, const String *caFile, const String *caPath);
#endif

View File

@ -50,6 +50,7 @@ typedef struct StorageGcsRequestAsyncParam
bool noBucket; // Exclude bucket from the URI?
bool upload; // Is an object upload?
bool noAuth; // Exclude authentication header?
bool tag; // Add tags when available?
const String *object; // Object to include in URI
const HttpHeader *header; // Request headers
const HttpQuery *query; // Query parameters
@ -81,6 +82,7 @@ typedef struct StorageGcsRequestParam
bool noBucket; // Exclude bucket from the URI?
bool upload; // Is an object upload?
bool noAuth; // Exclude authentication header?
bool tag; // Add tags when available?
const String *object; // Object to include in URI
const HttpHeader *header; // Request headers
const HttpQuery *query; // Query parameters

View File

@ -30,6 +30,7 @@ typedef struct StorageWriteGcs
HttpRequest *request; // Async chunk upload request
size_t chunkSize; // Size of chunks for resumable upload
bool tag; // Are tags available?
Buffer *chunkBuffer; // Block buffer (stores data until chunkSize is reached)
const String *uploadId; // Id for resumable upload
uint64_t uploadTotal; // Total bytes uploaded
@ -159,8 +160,6 @@ storageWriteGcsBlockAsync(StorageWriteGcs *this, bool done)
ASSERT(this != NULL);
ASSERT(this->chunkBuffer != NULL);
ASSERT(bufSize(this->chunkBuffer) > 0);
ASSERT(!done || this->uploadId != NULL);
MEM_CONTEXT_TEMP_BEGIN()
{
@ -175,7 +174,8 @@ storageWriteGcsBlockAsync(StorageWriteGcs *this, bool done)
// Get the upload id
if (this->uploadId == NULL)
{
HttpResponse *response = storageGcsRequestP(this->storage, HTTP_VERB_POST_STR, .upload = true, .query = query);
HttpResponse *response = storageGcsRequestP(
this->storage, HTTP_VERB_POST_STR, .upload = true, .tag = this->tag, .query = query);
MEM_CONTEXT_OBJ_BEGIN(this)
{
@ -284,7 +284,7 @@ storageWriteGcsClose(THIS_VOID)
MEM_CONTEXT_TEMP_BEGIN()
{
// If a resumable upload was started then finish that way
if (this->uploadId != NULL)
if (this->uploadId != NULL || this->tag)
{
// Write what is left in the chunk buffer
storageWriteGcsBlockAsync(this, true);
@ -322,12 +322,13 @@ storageWriteGcsClose(THIS_VOID)
/**********************************************************************************************************************************/
FN_EXTERN StorageWrite *
storageWriteGcsNew(StorageGcs *const storage, const String *const name, const size_t chunkSize)
storageWriteGcsNew(StorageGcs *const storage, const String *const name, const size_t chunkSize, const bool tag)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_GCS, storage);
FUNCTION_LOG_PARAM(STRING, name);
FUNCTION_LOG_PARAM(UINT64, chunkSize);
FUNCTION_LOG_PARAM(BOOL, tag);
FUNCTION_LOG_END();
ASSERT(storage != NULL);
@ -339,6 +340,7 @@ storageWriteGcsNew(StorageGcs *const storage, const String *const name, const si
{
.storage = storage,
.chunkSize = chunkSize,
.tag = tag,
.interface = (StorageWriteInterface)
{

View File

@ -10,6 +10,6 @@ GCS Storage File Write
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
FN_EXTERN StorageWrite *storageWriteGcsNew(StorageGcs *storage, const String *name, size_t chunkSize);
FN_EXTERN StorageWrite *storageWriteGcsNew(StorageGcs *storage, const String *name, size_t chunkSize, bool tag);
#endif

View File

@ -86,7 +86,8 @@ storageS3Helper(const unsigned int repoIdx, const bool write, StoragePathExpress
(StorageS3UriStyle)cfgOptionIdxStrId(cfgOptRepoS3UriStyle, repoIdx), cfgOptionIdxStr(cfgOptRepoS3Region, repoIdx),
keyType, cfgOptionIdxStrNull(cfgOptRepoS3Key, repoIdx), cfgOptionIdxStrNull(cfgOptRepoS3KeySecret, repoIdx),
cfgOptionIdxStrNull(cfgOptRepoS3Token, repoIdx), cfgOptionIdxStrNull(cfgOptRepoS3KmsKeyId, repoIdx), role,
webIdToken, (size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx), host, port, ioTimeoutMs(),
webIdToken, (size_t)cfgOptionIdxUInt64(cfgOptRepoStorageUploadChunkSize, repoIdx),
cfgOptionIdxKvNull(cfgOptRepoStorageTag, repoIdx), host, port, ioTimeoutMs(),
cfgOptionIdxBool(cfgOptRepoStorageVerifyTls, repoIdx), cfgOptionIdxStrNull(cfgOptRepoStorageCaFile, repoIdx),
cfgOptionIdxStrNull(cfgOptRepoStorageCaPath, repoIdx));
}

View File

@ -33,6 +33,7 @@ STRING_STATIC(S3_HEADER_TOKEN_STR, "x-amz-secur
STRING_STATIC(S3_HEADER_SRVSDENC_STR, "x-amz-server-side-encryption");
STRING_STATIC(S3_HEADER_SRVSDENC_KMS_STR, "aws:kms");
STRING_STATIC(S3_HEADER_SRVSDENC_KMSKEYID_STR, "x-amz-server-side-encryption-aws-kms-key-id");
STRING_STATIC(S3_HEADER_TAGGING, "x-amz-tagging");
/***********************************************************************************************************************************
S3 query tokens
@ -94,6 +95,7 @@ struct StorageS3
String *securityToken; // Security token, if any
const String *kmsKeyId; // Server-side encryption key
size_t partSize; // Part size for multi-part upload
const String *tag; // Tags to be applied to objects
unsigned int deleteMax; // Maximum objects that can be deleted in one request
StorageS3UriStyle uriStyle; // Path or host style URIs
const String *bucketEndpoint; // Set to {bucket}.{endpoint}
@ -453,6 +455,7 @@ storageS3RequestAsync(StorageS3 *this, const String *verb, const String *path, S
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
FUNCTION_LOG_PARAM(BUFFER, param.content);
FUNCTION_LOG_PARAM(BOOL, param.sseKms);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_END();
ASSERT(this != NULL);
@ -486,6 +489,10 @@ storageS3RequestAsync(StorageS3 *this, const String *verb, const String *path, S
httpHeaderPut(requestHeader, S3_HEADER_SRVSDENC_KMSKEYID_STR, this->kmsKeyId);
}
// Set tags when requested and available
if (param.tag && this->tag != NULL)
httpHeaderPut(requestHeader, S3_HEADER_TAGGING, this->tag);
// When using path-style URIs the bucket name needs to be prepended
if (this->uriStyle == storageS3UriStylePath)
path = strNewFmt("/%s%s", strZ(this->bucket), strZ(path));
@ -592,10 +599,12 @@ storageS3Request(StorageS3 *this, const String *verb, const String *path, Storag
FUNCTION_LOG_PARAM(BOOL, param.allowMissing);
FUNCTION_LOG_PARAM(BOOL, param.contentIo);
FUNCTION_LOG_PARAM(BOOL, param.sseKms);
FUNCTION_LOG_PARAM(BOOL, param.tag);
FUNCTION_LOG_END();
HttpRequest *const request = storageS3RequestAsyncP(
this, verb, path, .header = param.header, .query = param.query, .content = param.content, .sseKms = param.sseKms);
this, verb, path, .header = param.header, .query = param.query, .content = param.content, .sseKms = param.sseKms,
.tag = param.tag);
HttpResponse *const result = storageS3ResponseP(
request, .allowMissing = param.allowMissing, .contentIo = param.contentIo);
@ -1097,8 +1106,8 @@ storageS3New(
const String *const endPoint, const StorageS3UriStyle uriStyle, const String *const region, const StorageS3KeyType keyType,
const String *const accessKey, const String *const secretAccessKey, const String *const securityToken,
const String *const kmsKeyId, const String *const credRole, const String *const webIdToken, const size_t partSize,
const String *host, const unsigned int port, const TimeMSec timeout, const bool verifyPeer, const String *const caFile,
const String *const caPath)
const KeyValue *const tag, const String *host, const unsigned int port, const TimeMSec timeout, const bool verifyPeer,
const String *const caFile, const String *const caPath)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, path);
@ -1116,6 +1125,7 @@ storageS3New(
FUNCTION_TEST_PARAM(STRING, credRole);
FUNCTION_TEST_PARAM(STRING, webIdToken);
FUNCTION_LOG_PARAM(SIZE, partSize);
FUNCTION_LOG_PARAM(KEY_VALUE, tag);
FUNCTION_LOG_PARAM(STRING, host);
FUNCTION_LOG_PARAM(UINT, port);
FUNCTION_LOG_PARAM(TIME_MSEC, timeout);
@ -1149,6 +1159,14 @@ storageS3New(
.signingKeyDate = YYYYMMDD_STR,
};
// Create tag query string
if (write && tag != NULL)
{
HttpQuery *const query = httpQueryNewP(.kv = tag);
this->tag = httpQueryRenderP(query);
httpQueryFree(query);
}
// Create the HTTP client used to service requests
if (host == NULL)
host = this->bucketEndpoint;

View File

@ -37,7 +37,7 @@ FN_EXTERN Storage *storageS3New(
const String *path, bool write, StoragePathExpressionCallback pathExpressionFunction, const String *bucket,
const String *endPoint, StorageS3UriStyle uriStyle, const String *region, StorageS3KeyType keyType, const String *accessKey,
const String *secretAccessKey, const String *securityToken, const String *kmsKeyId, const String *credRole,
const String *webIdToken, size_t partSize, const String *host, unsigned int port, TimeMSec timeout, bool verifyPeer,
const String *caFile, const String *caPath);
const String *webIdToken, size_t partSize, const KeyValue *tag, const String *host, unsigned int port, TimeMSec timeout,
bool verifyPeer, const String *caFile, const String *caPath);
#endif

View File

@ -23,6 +23,7 @@ typedef struct StorageS3RequestAsyncParam
const HttpQuery *query; // Query parameters
const Buffer *content; // Request content
bool sseKms; // Enable server-side encryption?
bool tag; // Add tags when available?
} StorageS3RequestAsyncParam;
#define storageS3RequestAsyncP(this, verb, path, ...) \
@ -53,6 +54,7 @@ typedef struct StorageS3RequestParam
bool allowMissing; // Allow missing files (caller can check response code)
bool contentIo; // Is IoRead interface required to read content?
bool sseKms; // Enable server-side encryption?
bool tag; // Add tags when available?
} StorageS3RequestParam;
#define storageS3RequestP(this, verb, path, ...) \

View File

@ -127,7 +127,8 @@ storageWriteS3PartAsync(StorageWriteS3 *this)
httpResponseContent(
storageS3RequestP(
this->storage, HTTP_VERB_POST_STR, this->interface.name,
.query = httpQueryAdd(httpQueryNewP(), S3_QUERY_UPLOADS_STR, EMPTY_STR), .sseKms = true))));
.query = httpQueryAdd(httpQueryNewP(), S3_QUERY_UPLOADS_STR, EMPTY_STR), .sseKms = true,
.tag = true))));
// Store the upload id
MEM_CONTEXT_OBJ_BEGIN(this)
@ -254,7 +255,8 @@ storageWriteS3Close(THIS_VOID)
else
{
storageS3RequestP(
this->storage, HTTP_VERB_PUT_STR, this->interface.name, .content = this->partBuffer, .sseKms = true);
this->storage, HTTP_VERB_PUT_STR, this->interface.name, .content = this->partBuffer, .sseKms = true,
.tag = true);
}
bufFree(this->partBuffer);

View File

@ -599,9 +599,9 @@ unit:
- storage/s3/read
- storage/s3/storage
- storage/s3/write
- storage/helper
include:
- storage/helper
- storage/storage
- storage/write

View File

@ -329,6 +329,7 @@ testRun(void)
" --repo-storage-ca-path repository storage CA path\n"
" --repo-storage-host repository storage host\n"
" --repo-storage-port repository storage port [default=443]\n"
" --repo-storage-tag repository storage tag(s)\n"
" --repo-storage-upload-chunk-size repository storage upload chunk size\n"
" --repo-storage-verify-tls repository storage certificate verify\n"
" [default=y]\n"

View File

@ -182,6 +182,11 @@ testRun(void)
TEST_RESULT_VOID(FUNCTION_LOG_OBJECT_FORMAT(query2, httpQueryToLog, logBuf, sizeof(logBuf)), "httpQueryToLog");
TEST_RESULT_Z(logBuf, "{a/: '+b', c: 'd='}", "check log");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("new query from kv");
TEST_RESULT_STR_Z(httpQueryRenderP(httpQueryNewP(.kv = query->kv)), "key1=value%201%3F&key2=value2a", "new query");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("merge queries");

View File

@ -33,6 +33,7 @@ typedef struct TestRequestParam
const char *content;
const char *blobType;
const char *range;
const char *tag;
} TestRequestParam;
#define testRequestP(write, verb, path, ...) \
@ -94,9 +95,13 @@ testRequest(IoWrite *write, const char *verb, const char *path, TestRequestParam
if (param.blobType != NULL)
strCatFmt(request, "x-ms-blob-type:%s\r\n", param.blobType);
// Add tags
if (param.tag != NULL)
strCatFmt(request, "x-ms-tags:%s\r\n", param.tag);
// Add version
if (driver->sharedKey != NULL)
strCatZ(request, "x-ms-version:2019-02-02\r\n");
strCatZ(request, "x-ms-version:2019-12-12\r\n");
// Complete headers
strCatZ(request, "\r\n");
@ -393,7 +398,7 @@ testRun(void)
(StorageAzure *)storageDriver(
storageAzureNew(
STRDEF("/repo"), false, NULL, TEST_CONTAINER_STR, TEST_ACCOUNT_STR, storageAzureKeyTypeShared,
TEST_KEY_SHARED_STR, 16, STRDEF("blob.core.windows.net"), storageAzureUriStyleHost, 443, 1000, true, NULL,
TEST_KEY_SHARED_STR, 16, NULL, STRDEF("blob.core.windows.net"), storageAzureUriStyleHost, 443, 1000, true, NULL,
NULL)),
"new azure storage - shared key");
@ -407,7 +412,7 @@ testRun(void)
TEST_RESULT_Z(
logBuf,
"{content-length: '0', host: 'account.blob.core.windows.net', date: 'Sun, 21 Jun 2020 12:46:19 GMT'"
", x-ms-version: '2019-02-02', authorization: 'SharedKey account:edqgT7EhsiIN3q6Al2HCZlpXr2D5cJFavr2ZCkhG9R8='}",
", x-ms-version: '2019-12-12', authorization: 'SharedKey account:wZCOnSPB1KkkdjaQMcThkkKyUlfS0pPjwaIfd1cUh4Y='}",
"check headers");
// -------------------------------------------------------------------------------------------------------------------------
@ -423,8 +428,8 @@ testRun(void)
TEST_RESULT_Z(
logBuf,
"{content-length: '44', content-md5: 'b64f49553d5c441652e95697a2c5949e', host: 'account.blob.core.windows.net'"
", date: 'Sun, 21 Jun 2020 12:46:19 GMT', x-ms-version: '2019-02-02'"
", authorization: 'SharedKey account:5qAnroLtbY8IWqObx8+UVwIUysXujsfWZZav7PrBON0='}",
", date: 'Sun, 21 Jun 2020 12:46:19 GMT', x-ms-version: '2019-12-12'"
", authorization: 'SharedKey account:Adr+lyGByiEpKrKPyhY3c1uLBDgB7hw0XW5Do6u79Nw='}",
"check headers");
// -------------------------------------------------------------------------------------------------------------------------
@ -435,7 +440,7 @@ testRun(void)
(StorageAzure *)storageDriver(
storageAzureNew(
STRDEF("/repo"), false, NULL, TEST_CONTAINER_STR, TEST_ACCOUNT_STR, storageAzureKeyTypeSas, TEST_KEY_SAS_STR,
16, STRDEF("blob.core.usgovcloudapi.net"), storageAzureUriStyleHost, 443, 1000, true, NULL, NULL)),
16, NULL, STRDEF("blob.core.usgovcloudapi.net"), storageAzureUriStyleHost, 443, 1000, true, NULL, NULL)),
"new azure storage - sas key");
query = httpQueryAdd(httpQueryNewP(), STRDEF("a"), STRDEF("b"));
@ -474,6 +479,8 @@ testRun(void)
hrnCfgArgRawBool(argList, cfgOptRepoStorageVerifyTls, TEST_IN_CONTAINER);
hrnCfgEnvRawZ(cfgOptRepoAzureAccount, TEST_ACCOUNT);
hrnCfgEnvRawZ(cfgOptRepoAzureKey, TEST_KEY_SHARED);
hrnCfgArgRawZ(argList, cfgOptRepoStorageTag, "Key1=Value1");
hrnCfgArgRawZ(argList, cfgOptRepoStorageTag, " Key 2= Value 2");
HRN_CFG_LOAD(cfgCmdArchivePush, argList);
Storage *storage = NULL;
@ -547,7 +554,7 @@ testRun(void)
"content-length: 0\n"
"date: <redacted>\n"
"host: %s\n"
"x-ms-version: 2019-02-02\n"
"x-ms-version: 2019-12-12\n"
"*** Response Headers ***:\n"
"content-length: 7\n"
"*** Response Content ***:\n"
@ -557,7 +564,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write error");
testRequestP(service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testRequestP(
service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service, .code = 403);
TEST_ERROR_FMT(
@ -572,15 +581,20 @@ testRun(void)
"date: <redacted>\n"
"host: %s\n"
"x-ms-blob-type: BlockBlob\n"
"x-ms-version: 2019-02-02",
"x-ms-tags: %%20Key%%202=%%20Value%%202&Key1=Value1\n"
"x-ms-version: 2019-12-12",
strZ(hrnServerHost()));
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in one part (with retry)");
testRequestP(service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testRequestP(
service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service, .code = 503);
testRequestP(service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testRequestP(
service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service);
StorageWrite *write = NULL;
@ -601,7 +615,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write zero-length file");
testRequestP(service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "");
testRequestP(
service, HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service);
TEST_ASSIGN(write, storageNewWriteP(storage, STRDEF("file.txt")), "new write");
@ -625,7 +641,8 @@ testRun(void)
"<BlockList>"
"<Uncommitted>0AAAAAAACCCCCCCCx0000000</Uncommitted>"
"<Uncommitted>0AAAAAAACCCCCCCCx0000001</Uncommitted>"
"</BlockList>\n");
"</BlockList>\n",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service);
// Test needs a predictable file id
@ -637,6 +654,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with something left over on close");
// Stop writing tags
driver->tag = NULL;
testRequestP(
service, HTTP_VERB_PUT, "/file.txt?blockid=0AAAAAAACCCCCCCDx0000000&comp=block", .content = "1234567890123456");
testResponseP(service);

View File

@ -75,6 +75,7 @@ typedef struct TestRequestParam
const char *object;
const char *query;
const char *contentRange;
const char *contentType;
const char *content;
const char *range;
} TestRequestParam;
@ -110,6 +111,10 @@ testRequest(IoWrite *write, const char *verb, TestRequestParam param)
if (param.contentRange != NULL)
strCatFmt(request, "content-range:bytes %s\r\n", param.contentRange);
// Add content-type
if (param.contentType != NULL)
strCatFmt(request, "content-type:%s\r\n", param.contentType);
// Add host
strCatFmt(request, "host:%s\r\n", strZ(hrnServerHost()));
@ -245,7 +250,7 @@ testRun(void)
(StorageGcs *)storageDriver(
storageGcsNew(
STRDEF("/repo"), false, NULL, TEST_BUCKET_STR, storageGcsKeyTypeService, TEST_KEY_FILE_STR, TEST_CHUNK_SIZE,
TEST_ENDPOINT_STR, TEST_TIMEOUT, true, NULL, NULL)),
NULL, TEST_ENDPOINT_STR, TEST_TIMEOUT, true, NULL, NULL)),
"read-only gcs storage - service key");
TEST_RESULT_STR_Z(httpUrlHost(storage->authUrl), "test.com", "check host");
TEST_RESULT_STR_Z(httpUrlPath(storage->authUrl), "/token", "check path");
@ -270,7 +275,7 @@ testRun(void)
(StorageGcs *)storageDriver(
storageGcsNew(
STRDEF("/repo"), true, NULL, TEST_BUCKET_STR, storageGcsKeyTypeService, TEST_KEY_FILE_STR, TEST_CHUNK_SIZE,
TEST_ENDPOINT_STR, TEST_TIMEOUT, true, NULL, NULL)),
NULL, TEST_ENDPOINT_STR, TEST_TIMEOUT, true, NULL, NULL)),
"read/write gcs storage - service key");
TEST_RESULT_STR_Z(
@ -434,6 +439,8 @@ testRun(void)
StringList *argListAuto = strLstDup(argList);
hrnCfgArgRawStrId(argListAuto, cfgOptRepoGcsKeyType, storageGcsKeyTypeAuto);
hrnCfgArgRawZ(argListAuto, cfgOptRepoStorageTag, "Key1=Value1");
hrnCfgArgRawZ(argListAuto, cfgOptRepoStorageTag, " Key 2= Value 2");
HRN_CFG_LOAD(cfgCmdArchivePush, argListAuto);
TEST_ASSIGN(storage, storageRepoGet(0, true), "get repo storage");
@ -446,6 +453,10 @@ testRun(void)
// Tests need the chunk size to be 16
((StorageGcs *)storageDriver(storage))->chunkSize = 16;
// Store tags and set to NULL
const Buffer *tag = ((StorageGcs *)storageDriver(storage))->tag;
((StorageGcs *)storageDriver(storage))->tag = NULL;
hrnServerScriptAccept(service);
// -----------------------------------------------------------------------------------------------------------------
@ -582,9 +593,31 @@ testRun(void)
TEST_ERROR(storagePutP(write, NULL), FormatError, "expected size 55 for '/file.txt' but actual is 0");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with nothing left over on close");
TEST_TITLE("write zero-length file (with tags)");
testRequestP(service, HTTP_VERB_POST, .upload = true, .query = "name=file.txt&uploadType=resumable");
testRequestP(
service, HTTP_VERB_POST, .upload = true, .query = "name=file.txt&uploadType=resumable",
.contentType = "application/json", .content = "{\"metadata\":{\" Key 2\":\" Value 2\",\"Key1\":\"Value1\"}}");
testResponseP(service, .header = "x-guploader-uploadid:ulid3");
testRequestP(
service, HTTP_VERB_PUT, .upload = true, .noAuth = true,
.query = "fields=md5Hash%2Csize&name=file.txt&uploadType=resumable&upload_id=ulid3", .contentRange = "*/0");
testResponseP(service, .content = "{\"md5Hash\":\"1B2M2Y8AsgTpgAmY7PhCfg==\",\"size\":\"0\"}");
((StorageGcs *)storageDriver(storage))->tag = tag;
TEST_ASSIGN(write, storageNewWriteP(storage, STRDEF("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, NULL), "write");
((StorageGcs *)storageDriver(storage))->tag = NULL;
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with nothing left over on close (with tags)");
testRequestP(
service, HTTP_VERB_POST, .upload = true, .query = "name=file.txt&uploadType=resumable",
.contentType = "application/json", .content = "{\"metadata\":{\" Key 2\":\" Value 2\",\"Key1\":\"Value1\"}}");
testResponseP(service, .header = "x-guploader-uploadid:ulid1");
testRequestP(
@ -604,9 +637,13 @@ testRun(void)
.query = "fields=md5Hash%2Csize&name=file.txt&uploadType=resumable&upload_id=ulid1", .contentRange = "*/32");
testResponseP(service, .content = "{\"md5Hash\":\"dnF5x6K/8ZZRzpfSlMMM+w==\",\"size\":\"32\"}");
((StorageGcs *)storageDriver(storage))->tag = tag;
TEST_ASSIGN(write, storageNewWriteP(storage, STRDEF("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, BUFSTRDEF("12345678901234567890123456789012")), "write");
((StorageGcs *)storageDriver(storage))->tag = NULL;
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with something left over on close");

View File

@ -31,6 +31,7 @@ typedef struct TestRequestParam
const char *kms;
const char *ttl;
const char *token;
const char *tag;
} TestRequestParam;
#define testRequestP(write, s3, verb, path, ...) \
@ -81,6 +82,9 @@ testRequest(IoWrite *write, Storage *s3, const char *verb, const char *path, Tes
if (param.kms != NULL)
strCatZ(request, ";x-amz-server-side-encryption;x-amz-server-side-encryption-aws-kms-key-id");
if (param.tag != NULL)
strCatZ(request, ";x-amz-tagging");
strCatZ(request, ",Signature=????????????????????????????????????????????????????????????????\r\n");
}
@ -132,6 +136,10 @@ testRequest(IoWrite *write, Storage *s3, const char *verb, const char *path, Tes
strCatFmt(request, "x-amz-server-side-encryption-aws-kms-key-id:%s\r\n", param.kms);
}
// Add tags
if (param.tag != NULL)
strCatFmt(request, "x-amz-tagging:%s\r\n", param.tag);
// Add metadata token
if (param.token != NULL)
strCatFmt(request, "x-aws-ec2-metadata-token:%s\r\n", param.token);
@ -484,6 +492,8 @@ testRun(void)
hrnCfgArgRaw(argList, cfgOptRepoS3Role, credRole);
hrnCfgArgRawStrId(argList, cfgOptRepoS3KeyType, storageS3KeyTypeAuto);
hrnCfgArgRawZ(argList, cfgOptRepoS3KmsKeyId, "kmskey1");
hrnCfgArgRawZ(argList, cfgOptRepoStorageTag, "Key1=Value1");
hrnCfgArgRawZ(argList, cfgOptRepoStorageTag, " Key 2= Value 2");
HRN_CFG_LOAD(cfgCmdArchivePush, argList);
s3 = storageRepoGet(0, true);
@ -703,7 +713,7 @@ testRun(void)
testRequestP(
service, s3, HTTP_VERB_PUT, "/file.txt", .content = "ABCD", .accessKey = "xx", .securityToken = "zz",
.kms = "kmskey1");
.kms = "kmskey1", .tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service);
// Make a copy of the signing key to verify that it gets changed when the keys are updated
@ -735,7 +745,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write zero-length file");
testRequestP(service, s3, HTTP_VERB_PUT, "/file.txt", .content = "", .kms = "kmskey1");
testRequestP(
service, s3, HTTP_VERB_PUT, "/file.txt", .content = "", .kms = "kmskey1",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(service);
TEST_ASSIGN(write, storageNewWriteP(s3, STRDEF("file.txt")), "new write");
@ -744,7 +756,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with nothing left over on close");
testRequestP(service, s3, HTTP_VERB_POST, "/file.txt?uploads=", .kms = "kmskey1");
testRequestP(
service, s3, HTTP_VERB_POST, "/file.txt?uploads=", .kms = "kmskey1",
.tag = "%20Key%202=%20Value%202&Key1=Value1");
testResponseP(
service,
.content =
@ -781,6 +795,9 @@ testRun(void)
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("error in success response of multipart upload");
// Stop writing tags
driver->tag = NULL;
testRequestP(service, s3, HTTP_VERB_POST, "/file.txt?uploads=", .kms = "kmskey1");
testResponseP(
service,