1
0
mirror of https://github.com/pgbackrest/pgbackrest.git synced 2025-10-30 23:37:45 +02:00

Azure support for repository storage.

Azure and Azure-compatible object stores can now be used for repository storage.

Currently only shared key authentication is supported but SAS will be added soon.
This commit is contained in:
David Steele
2020-07-02 16:24:34 -04:00
parent 3e2c8874f7
commit 3f4371d7a2
43 changed files with 3328 additions and 58 deletions

View File

@@ -74,9 +74,9 @@ Tablespaces are fully supported and on restore tablespaces can be remapped to an
File and directory links are supported for any file or directory in the PostgreSQL cluster. When restoring it is possible to restore all links to their original locations, remap some or all links, or restore some or all links as normal files or directories within the cluster directory.
### S3-Compatible Object Store Support
### S3 and Azure Compatible Object Store Support
pgBackRest repositories can be located in S3-compatible object stores to allow for virtually unlimited capacity and retention.
pgBackRest repositories can be located in S3 and Azure compatible object stores to allow for virtually unlimited capacity and retention.
### Encryption

View File

@@ -211,6 +211,17 @@ use constant CFGOPT_REPO_HOST_CONFIG_PATH => CFGOPT_RE
use constant CFGOPT_REPO_HOST_PORT => CFGOPT_REPO_HOST . '-port';
use constant CFGOPT_REPO_HOST_USER => CFGOPT_REPO_HOST . '-user';
# Repository Azure
use constant CFGDEF_REPO_AZURE => CFGDEF_PREFIX_REPO . '-azure';
use constant CFGOPT_REPO_AZURE_ACCOUNT => CFGDEF_REPO_AZURE . '-account';
use constant CFGOPT_REPO_AZURE_CA_FILE => CFGDEF_REPO_AZURE . '-ca-file';
use constant CFGOPT_REPO_AZURE_CA_PATH => CFGDEF_REPO_AZURE . '-ca-path';
use constant CFGOPT_REPO_AZURE_CONTAINER => CFGDEF_REPO_AZURE . '-container';
use constant CFGOPT_REPO_AZURE_HOST => CFGDEF_REPO_AZURE . '-host';
use constant CFGOPT_REPO_AZURE_KEY => CFGDEF_REPO_AZURE . '-key';
use constant CFGOPT_REPO_AZURE_PORT => CFGDEF_REPO_AZURE . '-port';
use constant CFGOPT_REPO_AZURE_VERIFY_TLS => CFGDEF_REPO_AZURE . '-verify-tls';
# Repository S3
use constant CFGDEF_REPO_S3 => CFGDEF_PREFIX_REPO . '-s3';
use constant CFGOPT_REPO_S3_KEY => CFGDEF_REPO_S3 . '-key';
@@ -296,6 +307,7 @@ use constant CFGOPTVAL_BACKUP_TYPE_INCR => 'incr';
# Repo type
#-----------------------------------------------------------------------------------------------------------------------------------
use constant CFGOPTVAL_REPO_TYPE_AZURE => 'azure';
use constant CFGOPTVAL_REPO_TYPE_CIFS => 'cifs';
use constant CFGOPTVAL_REPO_TYPE_POSIX => 'posix';
use constant CFGOPTVAL_REPO_TYPE_S3 => 's3';
@@ -1748,6 +1760,82 @@ my %hConfigDefine =
}
},
&CFGOPT_REPO_AZURE_ACCOUNT =>
{
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
&CFGDEF_TYPE => CFGDEF_TYPE_STRING,
&CFGDEF_PREFIX => CFGDEF_PREFIX_REPO,
&CFGDEF_INDEX_TOTAL => CFGDEF_INDEX_REPO,
&CFGDEF_SECURE => true,
&CFGDEF_REQUIRED => true,
&CFGDEF_DEPEND =>
{
&CFGDEF_DEPEND_OPTION => CFGOPT_REPO_TYPE,
&CFGDEF_DEPEND_LIST => [CFGOPTVAL_REPO_TYPE_AZURE],
},
&CFGDEF_COMMAND => CFGOPT_REPO_TYPE,
},
&CFGOPT_REPO_AZURE_CA_FILE =>
{
&CFGDEF_INHERIT => CFGOPT_REPO_AZURE_HOST,
},
&CFGOPT_REPO_AZURE_CA_PATH =>
{
&CFGDEF_TYPE => CFGDEF_TYPE_PATH,
&CFGDEF_INHERIT => CFGOPT_REPO_AZURE_HOST,
},
&CFGOPT_REPO_AZURE_CONTAINER =>
{
&CFGDEF_TYPE => CFGDEF_TYPE_STRING,
&CFGDEF_PREFIX => CFGDEF_PREFIX_REPO,
&CFGDEF_INDEX_TOTAL => CFGDEF_INDEX_REPO,
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
&CFGDEF_DEPEND => CFGOPT_REPO_AZURE_ACCOUNT,
&CFGDEF_COMMAND => CFGOPT_REPO_TYPE,
},
&CFGOPT_REPO_AZURE_HOST =>
{
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
&CFGDEF_TYPE => CFGDEF_TYPE_STRING,
&CFGDEF_PREFIX => CFGDEF_PREFIX_REPO,
&CFGDEF_INDEX_TOTAL => CFGDEF_INDEX_REPO,
&CFGDEF_REQUIRED => false,
&CFGDEF_DEPEND => CFGOPT_REPO_AZURE_ACCOUNT,
&CFGDEF_COMMAND => CFGOPT_REPO_TYPE,
},
&CFGOPT_REPO_AZURE_KEY =>
{
&CFGDEF_INHERIT => CFGOPT_REPO_AZURE_ACCOUNT,
},
&CFGOPT_REPO_AZURE_PORT =>
{
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
&CFGDEF_TYPE => CFGDEF_TYPE_INTEGER,
&CFGDEF_PREFIX => CFGDEF_PREFIX_REPO,
&CFGDEF_INDEX_TOTAL => CFGDEF_INDEX_REPO,
&CFGDEF_DEFAULT => 443,
&CFGDEF_ALLOW_RANGE => [1, 65535],
&CFGDEF_DEPEND => CFGOPT_REPO_AZURE_ACCOUNT,
&CFGDEF_COMMAND => CFGOPT_REPO_TYPE,
},
&CFGOPT_REPO_AZURE_VERIFY_TLS =>
{
&CFGDEF_SECTION => CFGDEF_SECTION_GLOBAL,
&CFGDEF_TYPE => CFGDEF_TYPE_BOOLEAN,
&CFGDEF_PREFIX => CFGDEF_PREFIX_REPO,
&CFGDEF_INDEX_TOTAL => CFGDEF_INDEX_REPO,
&CFGDEF_DEFAULT => true,
&CFGDEF_DEPEND => CFGOPT_REPO_AZURE_ACCOUNT,
&CFGDEF_COMMAND => CFGOPT_REPO_TYPE,
},
&CFGOPT_REPO_S3_BUCKET =>
{
&CFGDEF_TYPE => CFGDEF_TYPE_STRING,
@@ -1926,6 +2014,7 @@ my %hConfigDefine =
&CFGDEF_DEFAULT => CFGOPTVAL_REPO_TYPE_POSIX,
&CFGDEF_ALLOW_LIST =>
[
&CFGOPTVAL_REPO_TYPE_AZURE,
&CFGOPTVAL_REPO_TYPE_CIFS,
&CFGOPTVAL_REPO_TYPE_POSIX,
&CFGOPTVAL_REPO_TYPE_S3,

View File

@@ -628,6 +628,10 @@ sub backrestConfig
delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}{&CFGOPT_LOG_LEVEL_STDERR});
delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}{&CFGOPT_LOG_TIMESTAMP});
# Don't show repo1-azure-host option. Since Azure behaves differently with Azurite (which we use for local testing) and
# the actual service we can't just fake /etc/hosts like we do for S3.
delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-host'});
if (keys(%{$$oConfigClean{&CFGDEF_SECTION_GLOBAL}}) == 0)
{
delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL});

View File

@@ -26,3 +26,16 @@ openssl req -new -sha256 -nodes -out s3-server.csr -key s3-server.key -config s3
openssl x509 -req -in s3-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial \
-out s3-server.crt -days 99999 -extensions v3_req -extfile s3.cnf
```
## Azure Certificate
Mimic an Azure certificate for the `*.blob.core.windows.net` hosts to generate Azure documentation.
```
cd [pgbackrest-root]/doc/resource/fake-cert
openssl ecparam -genkey -name prime256v1 | openssl ec -out azure-server.key
openssl req -new -sha256 -nodes -out azure-server.csr -key azure-server.key -config azure.cnf
openssl x509 -req -in azure-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial \
-out azure-server.crt -days 99999 -extensions v3_req -extfile azure.cnf
```

View File

@@ -0,0 +1,14 @@
-----BEGIN CERTIFICATE-----
MIICJjCCAc2gAwIBAgIUdW+DRN7XbILssJmdxycMz90EEwUwCgYIKoZIzj0EAwIw
VzELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA0FsbDEMMAoGA1UEBwwDQWxsMRMwEQYD
VQQKDApwZ0JhY2tSZXN0MRcwFQYDVQQDDA5wZ2JhY2tyZXN0Lm9yZzAgFw0yMDA2
MjkxOTM0MjhaGA8yMjk0MDQxMzE5MzQyOFowdzELMAkGA1UEBhMCVVMxDDAKBgNV
BAgMA0FsbDEMMAoGA1UEBwwDQWxsMRMwEQYDVQQKDApwZ0JhY2tSZXN0MRwwGgYD
VQQLDBNVbml0IFRlc3RpbmcgRG9tYWluMRkwFwYDVQQDDBBjb3JlLndpbmRvd3Mu
bmV0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEqQy14z/cTAwvIDUCgU+5ATJh
5hsvMaUrYfuCEFC9tx7+zeqrEbtWOqO1dQVnCfZr38lwrTDzJvZJKqh4rTlWoKNV
MFMwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwOQYDVR0RBDIwMIIVYmxvYi5jb3Jl
LndpbmRvd3MubmV0ghcqLmJsb2IuY29yZS53aW5kb3dzLm5ldDAKBggqhkjOPQQD
AgNHADBEAiB5RbKWvkzISbAHRqkg4egKcitsijqZsPJgpj4X91ercwIgBJmMNKVP
ELrECSmLFbJQCIZJAMcbzmLxZNcnsRaMUG8=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIEGn3zrwzQ8+ZP6i+eye3iqQybiBK4ap+JAQ0uNGEMP1oAoGCCqGSM49
AwEHoUQDQgAEqQy14z/cTAwvIDUCgU+5ATJh5hsvMaUrYfuCEFC9tx7+zeqrEbtW
OqO1dQVnCfZr38lwrTDzJvZJKqh4rTlWoA==
-----END EC PRIVATE KEY-----

View File

@@ -0,0 +1,23 @@
[req]
default_bits = 4096
prompt = no
default_md = sha256
req_extensions = v3_req
distinguished_name = dn
[ dn ]
C=US
ST=All
L=All
O=pgBackRest
OU=Unit Testing Domain
CN = core.windows.net
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = blob.core.windows.net
DNS.2 = *.blob.core.windows.net

View File

@@ -125,10 +125,10 @@
<p>File and directory links are supported for any file or directory in the <postgres/> cluster. When restoring it is possible to restore all links to their original locations, remap some or all links, or restore some or all links as normal files or directories within the cluster directory.</p>
</section>
<section id="s3-support">
<title>S3-Compatible Object Store Support</title>
<section id="object-store-support">
<title>S3 and Azure Compatible Object Store Support</title>
<p><backrest/> repositories can be located in <proper>S3-compatible</proper> object stores to allow for virtually unlimited capacity and retention.</p>
<p><backrest/> repositories can be located in <proper>S3</proper> and <proper>Azure</proper> compatible object stores to allow for virtually unlimited capacity and retention.</p>
</section>
<section id="encryption">

View File

@@ -309,6 +309,80 @@
The <setting>repo-retention-*</setting> options define how long backups will be retained. Expiration only occurs when the count of complete backups exceeds the allowed retention. In other words, if <setting>repo-retention-full-type</setting> is set to <setting>count</setting> (default) and <setting>repo-retention-full</setting> is set to 2, then there must be 3 complete backups before the oldest will be expired. If <setting>repo-retention-full-type</setting> is set to <setting>time</setting> then <setting>repo-retention-full</setting> represents days so there must be at least that many days worth of full backups before expiration can occur. Make sure you always have enough space for retention + 1 backups.</text>
<config-key-list>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-account" name="Azure Repository Account">
<summary>Azure repository account.</summary>
<text>Azure account used to store the repository.</text>
<example>pg-backup</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-ca-file" name="Azure Repository TLS CA File">
<summary>Azure repository TLS CA file.</summary>
<text>Use a CA file other than the system default.</text>
<example>/etc/pki/tls/certs/ca-bundle.crt</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-ca-path" name="Azure Respository TLS CA Path">
<summary>Azure repository TLS CA path.</summary>
<text>Use a CA path other than the system default.</text>
<example>/etc/pki/tls/certs</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-container" name="Azure Repository Container">
<summary>Azure repository container.</summary>
<text>Azure container used to store the repository.
<backrest/> repositories can be stored in the container root by setting <br-option>repo-path=/</br-option> but it is usually best to specify a prefix, such as <path>/repo</path>, so logs and other Azure-generated content can also be stored in the container.</text>
<example>pg-backup</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-host" name="Azure Repository Host">
<summary>Azure repository host.</summary>
<text>Connect to a host other than the default. This is typically used for testing.</text>
<example>127.0.0.1</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-key" name="Azure Repository Shared Key">
<summary>Azure repository shared key.</summary>
<text>Azure shared key used to access the container.</text>
<example>T+9+aov82qNhrcXSNGZCzm9mjd4d75/oxxOr6r1JVpgTLA==</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-port" name="Azure Repository Server Port">
<summary>Azure repository server port.</summary>
<text>Port to use when connecting to the default server (or host if specified). This is typically used for testing.</text>
<example>10000</example>
</config-key>
<!-- ======================================================================================================= -->
<config-key id="repo-azure-verify-tls" name="Azure Repository Server Certificate Verify">
<summary>Azure repository server certificate verify.</summary>
<text>Disables verification of the Azure server certificate. This should only be used for testing or other scenarios where a certificate has been self-signed.</text>
<example>n</example>
</config-key>
<!-- CONFIG - REPO SECTION - REPO-HOST KEY -->
<config-key id="repo-host" name="Repository Host">
<summary>Repository host when operating remotely via SSH.</summary>

View File

@@ -14,6 +14,18 @@
<release-list>
<release date="XXXX-XX-XX" version="2.28dev" title="UNDER DEVELOPMENT">
<release-core-list>
<release-feature-list>
<release-item>
<release-item-contributor-list>
<release-item-reviewer id="cynthia.shang"/>
<!-- Actually tester, but we don't have a tag for that yet -->
<release-item-reviewer id="don.seiler"/>
</release-item-contributor-list>
<p>Azure support for repository storage.</p>
</release-item>
</release-feature-list>
<release-improvement-list>
<release-item>
<p>Asynchronous S3 multipart upload.</p>
@@ -8405,6 +8417,11 @@
<contributor-name-display>Dmitry Didovicher</contributor-name-display>
</contributor>
<contributor id="don.seiler">
<contributor-name-display>Don Seiler</contributor-name-display>
<contributor-id type="github">dtseiler</contributor-id>
</contributor>
<contributor id="donicrosby">
<contributor-name-display>donicrosby</contributor-name-display>
<contributor-id type="github">donicrosby</contributor-id>

View File

@@ -134,6 +134,14 @@
<variable key="pg-switch-wal" if="{[pg-version]} &lt; 10">pg_switch_xlog</variable>
<variable key="pg-switch-wal" if="{[pg-version]} &gt;= 10">pg_switch_wal</variable>
<!-- Azure Settings -->
<variable key="azure-all">n</variable> <!-- Build all the documentation with Azure? -->
<variable key="azure-local">y</variable>
<variable key="azure-account">pgbackrest</variable>
<variable key="azure-container">demo-container</variable>
<variable key="azure-repo">demo-repo</variable>
<variable key="azure-key">YXpLZXk=</variable>
<!-- S3 Settings -->
<variable key="s3-all">n</variable> <!-- Build all the documentation with S3? -->
<variable key="s3-local">y</variable>
@@ -153,6 +161,9 @@
<variable key="host-mount">{[host-repo-path]}:{[pgbackrest-repo-path]}</variable>
<variable key="image-repo">pgbackrest/test</variable>
<variable key="host-azure-id">azure</variable>
<variable key="host-azure">azure-server</variable>
<variable key="host-s3-id">s3</variable>
<variable key="host-s3">s3-server</variable>
@@ -605,6 +616,37 @@
</execute-list>
</block-define>
<!-- ======================================================================================================================= -->
<block-define id="azure-setup">
<p><backrest/> supports locating repositories in <proper>Azure-compatible</proper> object stores. The container used to store the repository must be created in advance &amp;mdash; <backrest/> will not do it automatically. The repository can be located in the container root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the container without conflicts.</p>
<backrest-config host="{[azure-setup-host]}" file="{[backrest-config-demo]}" owner="{[azure-setup-config-owner]}">
<title>Configure <proper>Azure</proper></title>
<backrest-config-option section="global" key="repo1-type">azure</backrest-config-option>
<backrest-config-option section="global" key="repo1-path">/{[azure-repo]}</backrest-config-option>
<backrest-config-option section="global" key="repo1-azure-account">{[azure-account]}</backrest-config-option>
<backrest-config-option section="global" key="repo1-azure-key">{[azure-key]}</backrest-config-option>
<backrest-config-option section="global" key="repo1-azure-container">{[azure-container]}</backrest-config-option>
<backrest-config-option if="'{[azure-local]}' eq 'y'" section="global" key="repo1-azure-host">blob.core.windows.net</backrest-config-option>
<backrest-config-option section="global" key="process-max">4</backrest-config-option>
</backrest-config>
<execute-list if="'{[azure-local]}' eq 'y'" host="{[azure-setup-host]}" show="n">
<title>Create the container</title>
<!-- Set host entries to redirect to local azure server -->
<execute user="root" user-force="y" show="n">
<exe-cmd>echo "{[host-azure-ip]} blob.core.windows.net" | tee -a /etc/hosts</exe-cmd>
</execute>
<execute user="{[azure-setup-user]}" if="'{[azure-setup-create-container]}' eq 'y'" show='n'>
<exe-cmd>{[project-exe]} repo-create</exe-cmd>
</execute>
</execute-list>
</block-define>
<!-- ======================================================================================================================= -->
<block-define id="s3-setup">
<p><backrest/> supports locating repositories in <proper>S3-compatible</proper> object stores. The bucket used to store the repository must be created in advance &amp;mdash; <backrest/> will not do it automatically. The repository can be located in the bucket root (<path>/</path>) but it's usually best to place it in a subpath so object store logs or other data can also be stored in the bucket without conflicts.</p>
@@ -643,6 +685,9 @@
<section id="introduction">
<title>Introduction</title>
<!-- Create Azure server first to allow it time to boot before being used -->
<host-add if="'{[azure-local]}' eq 'y'" id="{[host-azure-id]}" name="{[host-azure]}" user="root" image="mcr.microsoft.com/azure-storage/azurite" os="{[os-type]}" option="-v {[fake-cert-path]}/azure-server.crt:/root/public.crt:ro -v {[fake-cert-path]}/azure-server.key:/root/private.key:ro -e AZURITE_ACCOUNTS='{[azure-account]}:{[azure-key]}'" param="azurite-blob --blobPort 443 --blobHost 0.0.0.0 --cert=/root/public.crt --key=/root/private.key" update-hosts="n"/>
<!-- Create S3 server first to allow it time to boot before being used -->
<host-add if="'{[s3-local]}' eq 'y'" id="{[host-s3-id]}" name="{[host-s3]}" user="root" image="minio/minio:RELEASE.2020-05-06T23-23-25Z" os="{[os-type]}" option="-v {[fake-cert-path]}/s3-server.crt:/root/.minio/certs/public.crt:ro -v {[fake-cert-path]}/s3-server.key:/root/.minio/certs/private.key:ro -e MINIO_REGION={[s3-region]} -e MINIO_DOMAIN={[s3-endpoint]} -e MINIO_BROWSER=off -e MINIO_ACCESS_KEY={[s3-key]} -e MINIO_SECRET_KEY={[s3-key-secret]}" param="server /data --address :443" update-hosts="n"/>
@@ -966,6 +1011,18 @@
</backrest-config>
</section>
<!-- =================================================================================================================== -->
<section id="azure-support" if="'{[azure-all]}' eq 'y'">
<title>Azure-Compatible Object Store Support</title>
<block id="azure-setup">
<block-variable-replace key="azure-setup-host">{[host-pg1]}</block-variable-replace>
<block-variable-replace key="azure-setup-user">postgres</block-variable-replace>
<block-variable-replace key="azure-setup-config-owner">postgres:postgres</block-variable-replace>
<block-variable-replace key="azure-setup-create-container">y</block-variable-replace>
</block>
</section>
<!-- =================================================================================================================== -->
<section id="s3-support" if="'{[s3-all]}' eq 'y'">
<title>S3-Compatible Object Store Support</title>
@@ -2077,6 +2134,41 @@
</execute-list>
</section>
<!-- ======================================================================================================================= -->
<section id="azure-support" if="'{[azure-all]}' ne 'y'" depend="/quickstart/configure-archiving">
<title>Azure-Compatible Object Store Support</title>
<block id="azure-setup">
<block-variable-replace key="azure-setup-host">{[host-pg1]}</block-variable-replace>
<block-variable-replace key="azure-setup-user">postgres</block-variable-replace>
<block-variable-replace key="azure-setup-config-owner">postgres:postgres</block-variable-replace>
<block-variable-replace key="azure-setup-create-container">y</block-variable-replace>
</block>
<p>Commands are run exactly as if the repository were stored on a local disk.</p>
<execute-list host="{[host-pg1]}">
<title>Create the stanza</title>
<execute user="postgres" output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-log-level-console=info stanza-create</exe-cmd>
<exe-highlight>completed successfully</exe-highlight>
</execute>
</execute-list>
<p>File creation time in object stores is relatively slow so commands benefit by increasing <br-option>process-max</br-option> to parallelize file creation.</p>
<execute-list host="{[host-pg1]}">
<title>Backup the {[postgres-cluster-demo]} cluster</title>
<execute user="postgres" output="y">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]}
--log-level-console=info backup</exe-cmd>
<exe-highlight>no prior backup exists|full backup size</exe-highlight>
</execute>
</execute-list>
</section>
<!-- SECTION => S3-SUPPORT -->
<section id="s3-support" if="'{[s3-all]}' ne 'y'" depend="/quickstart/configure-archiving">
<title>S3-Compatible Object Store Support</title>
@@ -2344,6 +2436,15 @@
</execute>
</execute-list> -->
<p if="'{[azure-all]}' eq 'y'">Configure Azure-compatible object store if required.</p>
<block id="azure-setup" if="'{[azure-all]}' eq 'y'">
<block-variable-replace key="azure-setup-host">{[host-repo1]}</block-variable-replace>
<block-variable-replace key="azure-setup-user">{[br-user]}</block-variable-replace>
<block-variable-replace key="azure-setup-config-owner">{[br-user]}:{[br-group]}</block-variable-replace>
<block-variable-replace key="azure-setup-create-container">n</block-variable-replace>
</block>
<p if="'{[s3-all]}' eq 'y'">Configure S3-compatible object store if required.</p>
<block id="s3-setup" if="'{[s3-all]}' eq 'y'">
@@ -2358,6 +2459,20 @@
<execute-list host="{[host-repo1]}">
<title>Create the stanza</title>
<!-- Delete the azure stanza so the server can be reused -->
<execute if="'{[azure-all]}' eq 'y'" user="{[br-user]}" output="n" show="n">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} stop</exe-cmd>
</execute>
<execute if="'{[azure-all]}' eq 'y'" user="{[br-user]}" output="n" show="n">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} {[dash]}-force stanza-delete</exe-cmd>
</execute>
<execute if="'{[azure-all]}' eq 'y'" user="{[br-user]}" output="n" show="n">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} start</exe-cmd>
</execute>
<!-- Create the stanza -->
<execute user="{[br-user]}" output="y" filter="n">
<exe-cmd>{[project-exe]} {[dash]}-stanza={[postgres-cluster-demo]} stanza-create</exe-cmd>
</execute>

View File

@@ -144,6 +144,9 @@ SRCS = \
protocol/parallel.c \
protocol/parallelJob.c \
protocol/server.c \
storage/azure/read.c \
storage/azure/storage.c \
storage/azure/write.c \
storage/cifs/storage.c \
storage/posix/read.c \
storage/posix/storage.c \

View File

@@ -8,6 +8,7 @@ Repository Create Command
#include "common/memContext.h"
#include "config/config.h"
#include "storage/helper.h"
#include "storage/azure/storage.intern.h"
#include "storage/s3/storage.intern.h"
/**********************************************************************************************************************************/
@@ -19,7 +20,15 @@ cmdRepoCreate(void)
MEM_CONTEXT_TEMP_BEGIN()
{
if (strEq(storageType(storageRepo()), STORAGE_S3_TYPE_STR))
{
storageS3RequestP((StorageS3 *)storageDriver(storageRepoWrite()), HTTP_VERB_PUT_STR, FSLASH_STR);
}
else if (strEq(storageType(storageRepo()), STORAGE_AZURE_TYPE_STR))
{
storageAzureRequestP(
(StorageAzure *)storageDriver(storageRepoWrite()), HTTP_VERB_PUT_STR,
.query = httpQueryAdd(httpQueryNew(), AZURE_QUERY_RESTYPE_STR, AZURE_QUERY_VALUE_CONTAINER_STR));
}
}
MEM_CONTEXT_TEMP_END();

View File

@@ -25,6 +25,7 @@ STRING_EXTERN(HTTP_HEADER_AUTHORIZATION_STR, HTTP_HEADER_
STRING_EXTERN(HTTP_HEADER_CONTENT_LENGTH_STR, HTTP_HEADER_CONTENT_LENGTH);
STRING_EXTERN(HTTP_HEADER_CONTENT_MD5_STR, HTTP_HEADER_CONTENT_MD5);
STRING_EXTERN(HTTP_HEADER_ETAG_STR, HTTP_HEADER_ETAG);
STRING_EXTERN(HTTP_HEADER_DATE_STR, HTTP_HEADER_DATE);
STRING_EXTERN(HTTP_HEADER_HOST_STR, HTTP_HEADER_HOST);
STRING_EXTERN(HTTP_HEADER_LAST_MODIFIED_STR, HTTP_HEADER_LAST_MODIFIED);

View File

@@ -43,6 +43,8 @@ HTTP Constants
STRING_DECLARE(HTTP_HEADER_CONTENT_LENGTH_STR);
#define HTTP_HEADER_CONTENT_MD5 "content-md5"
STRING_DECLARE(HTTP_HEADER_CONTENT_MD5_STR);
#define HTTP_HEADER_DATE "date"
STRING_DECLARE(HTTP_HEADER_DATE_STR);
#define HTTP_HEADER_ETAG "etag"
STRING_DECLARE(HTTP_HEADER_ETAG_STR);
#define HTTP_HEADER_HOST "host"

View File

@@ -427,6 +427,14 @@ STRING_EXTERN(CFGOPT_RAW_STR, CFGOPT_RAW);
STRING_EXTERN(CFGOPT_RECOVERY_OPTION_STR, CFGOPT_RECOVERY_OPTION);
STRING_EXTERN(CFGOPT_RECURSE_STR, CFGOPT_RECURSE);
STRING_EXTERN(CFGOPT_REMOTE_TYPE_STR, CFGOPT_REMOTE_TYPE);
STRING_EXTERN(CFGOPT_REPO1_AZURE_ACCOUNT_STR, CFGOPT_REPO1_AZURE_ACCOUNT);
STRING_EXTERN(CFGOPT_REPO1_AZURE_CA_FILE_STR, CFGOPT_REPO1_AZURE_CA_FILE);
STRING_EXTERN(CFGOPT_REPO1_AZURE_CA_PATH_STR, CFGOPT_REPO1_AZURE_CA_PATH);
STRING_EXTERN(CFGOPT_REPO1_AZURE_CONTAINER_STR, CFGOPT_REPO1_AZURE_CONTAINER);
STRING_EXTERN(CFGOPT_REPO1_AZURE_HOST_STR, CFGOPT_REPO1_AZURE_HOST);
STRING_EXTERN(CFGOPT_REPO1_AZURE_KEY_STR, CFGOPT_REPO1_AZURE_KEY);
STRING_EXTERN(CFGOPT_REPO1_AZURE_PORT_STR, CFGOPT_REPO1_AZURE_PORT);
STRING_EXTERN(CFGOPT_REPO1_AZURE_VERIFY_TLS_STR, CFGOPT_REPO1_AZURE_VERIFY_TLS);
STRING_EXTERN(CFGOPT_REPO1_CIPHER_PASS_STR, CFGOPT_REPO1_CIPHER_PASS);
STRING_EXTERN(CFGOPT_REPO1_CIPHER_TYPE_STR, CFGOPT_REPO1_CIPHER_TYPE);
STRING_EXTERN(CFGOPT_REPO1_HARDLINK_STR, CFGOPT_REPO1_HARDLINK);
@@ -1634,6 +1642,70 @@ static ConfigOptionData configOptionData[CFG_OPTION_TOTAL] = CONFIG_OPTION_LIST
CONFIG_OPTION_DEFINE_ID(cfgDefOptRemoteType)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_ACCOUNT)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureAccount)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_CA_FILE)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureCaFile)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_CA_PATH)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureCaPath)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_CONTAINER)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureContainer)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_HOST)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureHost)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_KEY)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureKey)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_PORT)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzurePort)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(
CONFIG_OPTION_NAME(CFGOPT_REPO1_AZURE_VERIFY_TLS)
CONFIG_OPTION_INDEX(0)
CONFIG_OPTION_DEFINE_ID(cfgDefOptRepoAzureVerifyTls)
)
//------------------------------------------------------------------------------------------------------------------------------
CONFIG_OPTION
(

View File

@@ -341,6 +341,22 @@ Option constants
STRING_DECLARE(CFGOPT_RECURSE_STR);
#define CFGOPT_REMOTE_TYPE "remote-type"
STRING_DECLARE(CFGOPT_REMOTE_TYPE_STR);
#define CFGOPT_REPO1_AZURE_ACCOUNT "repo1-azure-account"
STRING_DECLARE(CFGOPT_REPO1_AZURE_ACCOUNT_STR);
#define CFGOPT_REPO1_AZURE_CA_FILE "repo1-azure-ca-file"
STRING_DECLARE(CFGOPT_REPO1_AZURE_CA_FILE_STR);
#define CFGOPT_REPO1_AZURE_CA_PATH "repo1-azure-ca-path"
STRING_DECLARE(CFGOPT_REPO1_AZURE_CA_PATH_STR);
#define CFGOPT_REPO1_AZURE_CONTAINER "repo1-azure-container"
STRING_DECLARE(CFGOPT_REPO1_AZURE_CONTAINER_STR);
#define CFGOPT_REPO1_AZURE_HOST "repo1-azure-host"
STRING_DECLARE(CFGOPT_REPO1_AZURE_HOST_STR);
#define CFGOPT_REPO1_AZURE_KEY "repo1-azure-key"
STRING_DECLARE(CFGOPT_REPO1_AZURE_KEY_STR);
#define CFGOPT_REPO1_AZURE_PORT "repo1-azure-port"
STRING_DECLARE(CFGOPT_REPO1_AZURE_PORT_STR);
#define CFGOPT_REPO1_AZURE_VERIFY_TLS "repo1-azure-verify-tls"
STRING_DECLARE(CFGOPT_REPO1_AZURE_VERIFY_TLS_STR);
#define CFGOPT_REPO1_CIPHER_PASS "repo1-cipher-pass"
STRING_DECLARE(CFGOPT_REPO1_CIPHER_PASS_STR);
#define CFGOPT_REPO1_CIPHER_TYPE "repo1-cipher-type"
@@ -440,7 +456,7 @@ Option constants
#define CFGOPT_TYPE "type"
STRING_DECLARE(CFGOPT_TYPE_STR);
#define CFG_OPTION_TOTAL 193
#define CFG_OPTION_TOTAL 201
/***********************************************************************************************************************************
Command enum
@@ -618,6 +634,14 @@ typedef enum
cfgOptRecoveryOption,
cfgOptRecurse,
cfgOptRemoteType,
cfgOptRepoAzureAccount,
cfgOptRepoAzureCaFile,
cfgOptRepoAzureCaPath,
cfgOptRepoAzureContainer,
cfgOptRepoAzureHost,
cfgOptRepoAzureKey,
cfgOptRepoAzurePort,
cfgOptRepoAzureVerifyTls,
cfgOptRepoCipherPass,
cfgOptRepoCipherType,
cfgOptRepoHardlink,

View File

@@ -2891,6 +2891,429 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-account")
CFGDEFDATA_OPTION_REQUIRED(true)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeString)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(true)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository account.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Azure account used to store the repository."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-ca-file")
CFGDEFDATA_OPTION_REQUIRED(false)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeString)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository TLS CA file.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Use a CA file other than the system default."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-ca-path")
CFGDEFDATA_OPTION_REQUIRED(false)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypePath)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository TLS CA path.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Use a CA path other than the system default."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-container")
CFGDEFDATA_OPTION_REQUIRED(true)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeString)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository container.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Azure container used to store the repository.\n"
"\n"
"pgBackRest repositories can be stored in the container root by setting repo-path=/ but it is usually best to specify "
"a prefix, such as /repo, so logs and other Azure-generated content can also be stored in the container."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-host")
CFGDEFDATA_OPTION_REQUIRED(false)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeString)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository host.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Connect to a host other than the default. This is typically used for testing."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-key")
CFGDEFDATA_OPTION_REQUIRED(true)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeString)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(true)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository shared key.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Azure shared key used to access the container."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-port")
CFGDEFDATA_OPTION_REQUIRED(true)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeInteger)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository server port.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Port to use when connecting to the default server (or host if specified). This is typically used for testing."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_ALLOW_RANGE(1, 65535)
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_DEFAULT("443")
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
CFGDEFDATA_OPTION_NAME("repo-azure-verify-tls")
CFGDEFDATA_OPTION_REQUIRED(true)
CFGDEFDATA_OPTION_SECTION(cfgDefSectionGlobal)
CFGDEFDATA_OPTION_TYPE(cfgDefOptTypeBoolean)
CFGDEFDATA_OPTION_INTERNAL(false)
CFGDEFDATA_OPTION_INDEX_TOTAL(1)
CFGDEFDATA_OPTION_SECURE(false)
CFGDEFDATA_OPTION_HELP_SECTION("repository")
CFGDEFDATA_OPTION_HELP_SUMMARY("Azure repository server certificate verify.")
CFGDEFDATA_OPTION_HELP_DESCRIPTION
(
"Disables verification of the Azure server certificate. This should only be used for testing or other scenarios where "
"a certificate has been self-signed."
)
CFGDEFDATA_OPTION_COMMAND_LIST
(
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchiveGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdArchivePush)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdBackup)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdCheck)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdExpire)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdInfo)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoGet)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoLs)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoPut)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRepoRm)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdRestore)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaCreate)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaDelete)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStanzaUpgrade)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStart)
CFGDEFDATA_OPTION_COMMAND(cfgDefCmdStop)
)
CFGDEFDATA_OPTION_OPTIONAL_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_DEPEND_LIST
(
cfgDefOptRepoType,
"azure"
)
CFGDEFDATA_OPTION_OPTIONAL_DEFAULT("1")
CFGDEFDATA_OPTION_OPTIONAL_PREFIX("repo")
)
)
// -----------------------------------------------------------------------------------------------------------------------------
CFGDEFDATA_OPTION
(
@@ -4415,6 +4838,7 @@ static ConfigDefineOptionData configDefineOptionData[] = CFGDEFDATA_OPTION_LIST
(
CFGDEFDATA_OPTION_OPTIONAL_ALLOW_LIST
(
"azure",
"cifs",
"posix",
"s3"

View File

@@ -112,6 +112,14 @@ typedef enum
cfgDefOptRecoveryOption,
cfgDefOptRecurse,
cfgDefOptRemoteType,
cfgDefOptRepoAzureAccount,
cfgDefOptRepoAzureCaFile,
cfgDefOptRepoAzureCaPath,
cfgDefOptRepoAzureContainer,
cfgDefOptRepoAzureHost,
cfgDefOptRepoAzureKey,
cfgDefOptRepoAzurePort,
cfgDefOptRepoAzureVerifyTls,
cfgDefOptRepoCipherPass,
cfgDefOptRepoCipherType,
cfgDefOptRepoHardlink,

View File

@@ -1853,6 +1853,105 @@ static const struct option optionList[] =
.val = PARSE_OPTION_FLAG | cfgOptRemoteType,
},
// repo-azure-account option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_ACCOUNT,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureAccount,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_ACCOUNT,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureAccount,
},
// repo-azure-ca-file option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_CA_FILE,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureCaFile,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_CA_FILE,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureCaFile,
},
// repo-azure-ca-path option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_CA_PATH,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureCaPath,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_CA_PATH,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureCaPath,
},
// repo-azure-container option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_CONTAINER,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureContainer,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_CONTAINER,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureContainer,
},
// repo-azure-host option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_HOST,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureHost,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_HOST,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureHost,
},
// repo-azure-key option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_KEY,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureKey,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_KEY,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureKey,
},
// repo-azure-port option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_PORT,
.has_arg = required_argument,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzurePort,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_PORT,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzurePort,
},
// repo-azure-verify-tls option
// -----------------------------------------------------------------------------------------------------------------------------
{
.name = CFGOPT_REPO1_AZURE_VERIFY_TLS,
.val = PARSE_OPTION_FLAG | cfgOptRepoAzureVerifyTls,
},
{
.name = "no-" CFGOPT_REPO1_AZURE_VERIFY_TLS,
.val = PARSE_OPTION_FLAG | PARSE_NEGATE_FLAG | cfgOptRepoAzureVerifyTls,
},
{
.name = "reset-" CFGOPT_REPO1_AZURE_VERIFY_TLS,
.val = PARSE_OPTION_FLAG | PARSE_RESET_FLAG | cfgOptRepoAzureVerifyTls,
},
// repo-cipher-pass option and deprecations
// -----------------------------------------------------------------------------------------------------------------------------
{
@@ -2740,6 +2839,14 @@ static const ConfigOption optionResolveOrder[] =
cfgOptPgHostUser + 6,
cfgOptPgHostUser + 7,
cfgOptRecoveryOption,
cfgOptRepoAzureAccount,
cfgOptRepoAzureCaFile,
cfgOptRepoAzureCaPath,
cfgOptRepoAzureContainer,
cfgOptRepoAzureHost,
cfgOptRepoAzureKey,
cfgOptRepoAzurePort,
cfgOptRepoAzureVerifyTls,
cfgOptRepoCipherPass,
cfgOptRepoHost,
cfgOptRepoHostCmd,

160
src/storage/azure/read.c Normal file
View File

@@ -0,0 +1,160 @@
/***********************************************************************************************************************************
Azure Storage Read
***********************************************************************************************************************************/
#include "build.auto.h"
#include <fcntl.h>
#include <unistd.h>
#include "common/debug.h"
#include "common/io/http/client.h"
#include "common/io/read.intern.h"
#include "common/log.h"
#include "common/memContext.h"
#include "common/type/object.h"
#include "storage/azure/read.h"
#include "storage/read.intern.h"
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
#define STORAGE_READ_AZURE_TYPE StorageReadAzure
#define STORAGE_READ_AZURE_PREFIX storageReadAzure
typedef struct StorageReadAzure
{
MemContext *memContext; // Object mem context
StorageReadInterface interface; // Interface
StorageAzure *storage; // Storage that created this object
HttpResponse *httpResponse; // HTTP response
} StorageReadAzure;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_STORAGE_READ_AZURE_TYPE \
StorageReadAzure *
#define FUNCTION_LOG_STORAGE_READ_AZURE_FORMAT(value, buffer, bufferSize) \
objToLog(value, "StorageReadAzure", buffer, bufferSize)
/***********************************************************************************************************************************
Open the file
***********************************************************************************************************************************/
static bool
storageReadAzureOpen(THIS_VOID)
{
THIS(StorageReadAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_READ_AZURE, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(this->httpResponse == NULL);
bool result = false;
// Request the file
MEM_CONTEXT_BEGIN(this->memContext)
{
this->httpResponse = storageAzureRequestP(
this->storage, HTTP_VERB_GET_STR, .uri = this->interface.name, .allowMissing = true, .contentIo = true);
}
MEM_CONTEXT_END();
if (httpResponseCodeOk(this->httpResponse))
{
result = true;
}
// Else error unless ignore missing
else if (!this->interface.ignoreMissing)
THROW_FMT(FileMissingError, "unable to open '%s': No such file or directory", strPtr(this->interface.name));
FUNCTION_LOG_RETURN(BOOL, result);
}
/***********************************************************************************************************************************
Read from a file
***********************************************************************************************************************************/
static size_t
storageReadAzure(THIS_VOID, Buffer *buffer, bool block)
{
THIS(StorageReadAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_READ_AZURE, this);
FUNCTION_LOG_PARAM(BUFFER, buffer);
FUNCTION_LOG_PARAM(BOOL, block);
FUNCTION_LOG_END();
ASSERT(this != NULL && this->httpResponse != NULL);
ASSERT(httpResponseIoRead(this->httpResponse) != NULL);
ASSERT(buffer != NULL && !bufFull(buffer));
FUNCTION_LOG_RETURN(SIZE, ioRead(httpResponseIoRead(this->httpResponse), buffer));
}
/***********************************************************************************************************************************
Has file reached EOF?
***********************************************************************************************************************************/
static bool
storageReadAzureEof(THIS_VOID)
{
THIS(StorageReadAzure);
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STORAGE_READ_AZURE, this);
FUNCTION_TEST_END();
ASSERT(this != NULL && this->httpResponse != NULL);
ASSERT(httpResponseIoRead(this->httpResponse) != NULL);
FUNCTION_TEST_RETURN(ioReadEof(httpResponseIoRead(this->httpResponse)));
}
/**********************************************************************************************************************************/
StorageRead *
storageReadAzureNew(StorageAzure *storage, const String *name, bool ignoreMissing)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_AZURE, storage);
FUNCTION_LOG_PARAM(STRING, name);
FUNCTION_LOG_PARAM(BOOL, ignoreMissing);
FUNCTION_LOG_END();
ASSERT(storage != NULL);
ASSERT(name != NULL);
StorageRead *this = NULL;
MEM_CONTEXT_NEW_BEGIN("StorageReadAzure")
{
StorageReadAzure *driver = memNew(sizeof(StorageReadAzure));
*driver = (StorageReadAzure)
{
.memContext = MEM_CONTEXT_NEW(),
.storage = storage,
.interface = (StorageReadInterface)
{
.type = STORAGE_AZURE_TYPE_STR,
.name = strDup(name),
.ignoreMissing = ignoreMissing,
.ioInterface = (IoReadInterface)
{
.eof = storageReadAzureEof,
.open = storageReadAzureOpen,
.read = storageReadAzure,
},
},
};
this = storageReadNew(driver, &driver->interface);
}
MEM_CONTEXT_NEW_END();
FUNCTION_LOG_RETURN(STORAGE_READ, this);
}

15
src/storage/azure/read.h Normal file
View File

@@ -0,0 +1,15 @@
/***********************************************************************************************************************************
Azure Storage Read
***********************************************************************************************************************************/
#ifndef STORAGE_AZURE_READ_H
#define STORAGE_AZURE_READ_H
#include "storage/azure/storage.intern.h"
#include "storage/read.h"
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
StorageRead *storageReadAzureNew(StorageAzure *storage, const String *name, bool ignoreMissing);
#endif

735
src/storage/azure/storage.c Normal file
View File

@@ -0,0 +1,735 @@
/***********************************************************************************************************************************
Azure Storage
***********************************************************************************************************************************/
#include "build.auto.h"
#include <string.h>
#include "common/crypto/common.h"
#include "common/crypto/hash.h"
#include "common/encode.h"
#include "common/debug.h"
#include "common/io/http/client.h"
#include "common/io/http/common.h"
#include "common/log.h"
#include "common/memContext.h"
#include "common/regExp.h"
#include "common/type/object.h"
#include "common/type/xml.h"
#include "storage/azure/read.h"
#include "storage/azure/storage.intern.h"
#include "storage/azure/write.h"
/***********************************************************************************************************************************
Storage type
***********************************************************************************************************************************/
STRING_EXTERN(STORAGE_AZURE_TYPE_STR, STORAGE_AZURE_TYPE);
/***********************************************************************************************************************************
Azure default hosts
***********************************************************************************************************************************/
#define AZURE_HOST "core.windows.net"
#define AZURE_BLOB_HOST "blob." AZURE_HOST
/***********************************************************************************************************************************
Azure http headers
***********************************************************************************************************************************/
STRING_STATIC(AZURE_HEADER_VERSION_STR, "x-ms-version");
STRING_STATIC(AZURE_HEADER_VERSION_VALUE_STR, "2019-02-02");
/***********************************************************************************************************************************
Azure query tokens
***********************************************************************************************************************************/
STRING_STATIC(AZURE_QUERY_MARKER_STR, "marker");
STRING_EXTERN(AZURE_QUERY_COMP_STR, AZURE_QUERY_COMP);
STRING_STATIC(AZURE_QUERY_DELIMITER_STR, "delimiter");
STRING_STATIC(AZURE_QUERY_PREFIX_STR, "prefix");
STRING_EXTERN(AZURE_QUERY_RESTYPE_STR, AZURE_QUERY_RESTYPE);
STRING_STATIC(AZURE_QUERY_VALUE_LIST_STR, "list");
STRING_EXTERN(AZURE_QUERY_VALUE_CONTAINER_STR, AZURE_QUERY_VALUE_CONTAINER);
/***********************************************************************************************************************************
XML tags
***********************************************************************************************************************************/
STRING_STATIC(AZURE_XML_TAG_BLOB_PREFIX_STR, "BlobPrefix");
STRING_STATIC(AZURE_XML_TAG_BLOB_STR, "Blob");
STRING_STATIC(AZURE_XML_TAG_BLOBS_STR, "Blobs");
STRING_STATIC(AZURE_XML_TAG_CONTENT_LENGTH_STR, "Content-Length");
STRING_STATIC(AZURE_XML_TAG_LAST_MODIFIED_STR, "Last-Modified");
STRING_STATIC(AZURE_XML_TAG_NEXT_MARKER_STR, "NextMarker");
STRING_STATIC(AZURE_XML_TAG_NAME_STR, "Name");
STRING_STATIC(AZURE_XML_TAG_PROPERTIES_STR, "Properties");
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
struct StorageAzure
{
STORAGE_COMMON_MEMBER;
MemContext *memContext;
HttpClient *httpClient; // Http client to service requests
StringList *headerRedactList; // List of headers to redact from logging
const String *container; // Container to store data in
const String *account; // Account
const String *key; // Shared Secret Key
const String *host; // Host name
size_t blockSize; // Block size for multi-block upload
const String *uriPrefix; // Account/container prefix
uint64_t fileId; // Id to used to make file block identifiers unique
};
/***********************************************************************************************************************************
Generate authorization header and add it to the supplied header list
Based on the documentation at https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key
***********************************************************************************************************************************/
static void
storageAzureAuth(
StorageAzure *this, const String *verb, const String *uri, const HttpQuery *query, const String *dateTime,
HttpHeader *httpHeader)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STORAGE_AZURE, this);
FUNCTION_TEST_PARAM(STRING, verb);
FUNCTION_TEST_PARAM(STRING, uri);
FUNCTION_TEST_PARAM(HTTP_QUERY, query);
FUNCTION_TEST_PARAM(STRING, dateTime);
FUNCTION_TEST_PARAM(KEY_VALUE, httpHeader);
FUNCTION_TEST_END();
ASSERT(this != NULL);
ASSERT(verb != NULL);
ASSERT(uri != NULL);
ASSERT(dateTime != NULL);
ASSERT(httpHeader != NULL);
ASSERT(httpHeaderGet(httpHeader, HTTP_HEADER_CONTENT_LENGTH_STR) != NULL);
MEM_CONTEXT_TEMP_BEGIN()
{
// Set required headers
httpHeaderPut(httpHeader, HTTP_HEADER_DATE_STR, dateTime);
httpHeaderPut(httpHeader, HTTP_HEADER_HOST_STR, this->host);
httpHeaderPut(httpHeader, AZURE_HEADER_VERSION_STR, AZURE_HEADER_VERSION_VALUE_STR);
// Generate canonical headers
String *headerCanonical = strNew("");
StringList *headerKeyList = httpHeaderList(httpHeader);
for (unsigned int headerKeyIdx = 0; headerKeyIdx < strLstSize(headerKeyList); headerKeyIdx++)
{
const String *headerKey = strLstGet(headerKeyList, headerKeyIdx);
if (!strBeginsWithZ(headerKey, "x-ms-"))
continue;
strCatFmt(headerCanonical, "%s:%s\n", strPtr(headerKey), strPtr(httpHeaderGet(httpHeader, headerKey)));
}
// Generate canonical query
String *queryCanonical = strNew("");
if (query != NULL)
{
StringList *queryKeyList = httpQueryList(query);
ASSERT(strLstSize(queryKeyList) > 0);
for (unsigned int queryKeyIdx = 0; queryKeyIdx < strLstSize(queryKeyList); queryKeyIdx++)
{
const String *queryKey = strLstGet(queryKeyList, queryKeyIdx);
strCatFmt(queryCanonical, "\n%s:%s", strPtr(queryKey), strPtr(httpQueryGet(query, queryKey)));
}
}
// Generate string to sign
const String *contentLength = httpHeaderGet(httpHeader, HTTP_HEADER_CONTENT_LENGTH_STR);
const String *contentMd5 = httpHeaderGet(httpHeader, HTTP_HEADER_CONTENT_MD5_STR);
const String *stringToSign = strNewFmt(
"%s\n" // verb
"\n" // content-encoding
"\n" // content-language
"%s\n" // content-length
"%s\n" // content-md5
"\n" // content-type
"%s\n" // date
"\n" // If-Modified-Since
"\n" // If-Match
"\n" // If-None-Match
"\n" // If-Unmodified-Since
"\n" // range
"%s" // Canonicalized headers
"/%s%s" // Canonicalized account/uri
"%s", // Canonicalized query
strPtr(verb), strEq(contentLength, ZERO_STR) ? "" : strPtr(contentLength), contentMd5 == NULL ? "" : strPtr(contentMd5),
strPtr(dateTime), strPtr(headerCanonical), strPtr(this->account), strPtr(uri), strPtr(queryCanonical));
// Generate authorization header
Buffer *keyBin = bufNew(decodeToBinSize(encodeBase64, strPtr(this->key)));
decodeToBin(encodeBase64, strPtr(this->key), bufPtr(keyBin));
bufUsedSet(keyBin, bufSize(keyBin));
char authHmacBase64[45];
encodeToStr(
encodeBase64, bufPtr(cryptoHmacOne(HASH_TYPE_SHA256_STR, keyBin, BUFSTR(stringToSign))),
HASH_TYPE_SHA256_SIZE, authHmacBase64);
httpHeaderPut(
httpHeader, HTTP_HEADER_AUTHORIZATION_STR, strNewFmt("SharedKey %s:%s", strPtr(this->account), authHmacBase64));
}
MEM_CONTEXT_TEMP_END();
FUNCTION_TEST_RETURN_VOID();
}
/***********************************************************************************************************************************
Process Azure request
***********************************************************************************************************************************/
HttpRequest *
storageAzureRequestAsync(StorageAzure *this, const String *verb, StorageAzureRequestAsyncParam param)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, verb);
FUNCTION_LOG_PARAM(STRING, param.uri);
FUNCTION_LOG_PARAM(HTTP_HEADER, param.header);
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
FUNCTION_LOG_PARAM(BUFFER, param.content);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(verb != NULL);
HttpRequest *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
// Prepend uri prefix
param.uri = param.uri == NULL ? this->uriPrefix : strNewFmt("%s%s", strPtr(this->uriPrefix), strPtr(param.uri));
// Create header list and add content length
HttpHeader *requestHeader = param.header == NULL ?
httpHeaderNew(this->headerRedactList) : httpHeaderDup(param.header, this->headerRedactList);
// Set content length
httpHeaderAdd(
requestHeader, HTTP_HEADER_CONTENT_LENGTH_STR,
param.content == NULL || bufUsed(param.content) == 0 ? ZERO_STR : strNewFmt("%zu", bufUsed(param.content)));
// Calculate content-md5 header if there is content
if (param.content != NULL)
{
char md5Hash[HASH_TYPE_MD5_SIZE_HEX];
encodeToStr(encodeBase64, bufPtr(cryptoHashOne(HASH_TYPE_MD5_STR, param.content)), HASH_TYPE_M5_SIZE, md5Hash);
httpHeaderAdd(requestHeader, HTTP_HEADER_CONTENT_MD5_STR, STR(md5Hash));
}
// Generate authorization header
storageAzureAuth(this, verb, httpUriEncode(param.uri, true), param.query, httpDateFromTime(time(NULL)), requestHeader);
// Send request
MEM_CONTEXT_PRIOR_BEGIN()
{
result = httpRequestNewP(
this->httpClient, verb, param.uri, .query = param.query, .header = requestHeader, .content = param.content);
}
MEM_CONTEXT_END();
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(HTTP_REQUEST, result);
}
HttpResponse *
storageAzureResponse(HttpRequest *request, StorageAzureResponseParam param)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(HTTP_REQUEST, request);
FUNCTION_LOG_PARAM(BOOL, param.allowMissing);
FUNCTION_LOG_PARAM(BOOL, param.contentIo);
FUNCTION_LOG_END();
ASSERT(request != NULL);
HttpResponse *result = NULL;
MEM_CONTEXT_TEMP_BEGIN()
{
// Get response
result = httpRequest(request, !param.contentIo);
// Error if the request was not successful
if (!httpResponseCodeOk(result) && (!param.allowMissing || httpResponseCode(result) != HTTP_RESPONSE_CODE_NOT_FOUND))
httpRequestError(request, result);
// Move response to the prior context
httpResponseMove(result, memContextPrior());
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(HTTP_RESPONSE, result);
}
HttpResponse *
storageAzureRequest(StorageAzure *this, const String *verb, StorageAzureRequestParam param)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, verb);
FUNCTION_LOG_PARAM(STRING, param.uri);
FUNCTION_LOG_PARAM(HTTP_HEADER, param.header);
FUNCTION_LOG_PARAM(HTTP_QUERY, param.query);
FUNCTION_LOG_PARAM(BUFFER, param.content);
FUNCTION_LOG_PARAM(BOOL, param.allowMissing);
FUNCTION_LOG_PARAM(BOOL, param.contentIo);
FUNCTION_LOG_END();
FUNCTION_LOG_RETURN(
HTTP_RESPONSE,
storageAzureResponseP(
storageAzureRequestAsyncP(
this, verb, .uri = param.uri, .header = param.header, .query = param.query, .content = param.content),
.allowMissing = param.allowMissing, .contentIo = param.contentIo));
}
/***********************************************************************************************************************************
General function for listing files to be used by other list routines
***********************************************************************************************************************************/
static void
storageAzureListInternal(
StorageAzure *this, const String *path, const String *expression, bool recurse,
void (*callback)(StorageAzure *this, void *callbackData, const String *name, StorageType type, const XmlNode *xml),
void *callbackData)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, path);
FUNCTION_LOG_PARAM(STRING, expression);
FUNCTION_LOG_PARAM(BOOL, recurse);
FUNCTION_LOG_PARAM(FUNCTIONP, callback);
FUNCTION_LOG_PARAM_P(VOID, callbackData);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(path != NULL);
MEM_CONTEXT_TEMP_BEGIN()
{
const String *marker = NULL;
// Build the base prefix by stripping off the initial /
const String *basePrefix;
if (strSize(path) == 1)
basePrefix = EMPTY_STR;
else
basePrefix = strNewFmt("%s/", strPtr(strSub(path, 1)));
// Get the expression prefix when possible to limit initial results
const String *expressionPrefix = regExpPrefix(expression);
// If there is an expression prefix then use it to build the query prefix, otherwise query prefix is base prefix
const String *queryPrefix;
if (expressionPrefix == NULL)
queryPrefix = basePrefix;
else
{
if (strEmpty(basePrefix))
queryPrefix = expressionPrefix;
else
queryPrefix = strNewFmt("%s%s", strPtr(basePrefix), strPtr(expressionPrefix));
}
// Loop as long as a continuation token returned
do
{
// Use an inner mem context here because we could potentially be retrieving millions of files so it is a good idea to
// free memory at regular intervals
MEM_CONTEXT_TEMP_BEGIN()
{
HttpQuery *query = httpQueryNew();
// Add continuation token from the prior loop if any
if (marker != NULL)
httpQueryAdd(query, AZURE_QUERY_MARKER_STR, marker);
// Add the delimiter to not recurse
if (!recurse)
httpQueryAdd(query, AZURE_QUERY_DELIMITER_STR, FSLASH_STR);
// Add resource type
httpQueryAdd(query, AZURE_QUERY_RESTYPE_STR, AZURE_QUERY_VALUE_CONTAINER_STR);
// Add list comp
httpQueryAdd(query, AZURE_QUERY_COMP_STR, AZURE_QUERY_VALUE_LIST_STR);
// Don't specify empty prefix because it is the default
if (!strEmpty(queryPrefix))
httpQueryAdd(query, AZURE_QUERY_PREFIX_STR, queryPrefix);
XmlNode *xmlRoot = xmlDocumentRoot(
xmlDocumentNewBuf(httpResponseContent(storageAzureRequestP(this, HTTP_VERB_GET_STR, .query = query))));
// Get subpath list
XmlNode *blobs = xmlNodeChild(xmlRoot, AZURE_XML_TAG_BLOBS_STR, true);
XmlNodeList *blobPrefixList = xmlNodeChildList(blobs, AZURE_XML_TAG_BLOB_PREFIX_STR);
for (unsigned int blobPrefixIdx = 0; blobPrefixIdx < xmlNodeLstSize(blobPrefixList); blobPrefixIdx++)
{
const XmlNode *subPathNode = xmlNodeLstGet(blobPrefixList, blobPrefixIdx);
// Get subpath name
const String *subPath = xmlNodeContent(xmlNodeChild(subPathNode, AZURE_XML_TAG_NAME_STR, true));
// Strip off base prefix and final /
subPath = strSubN(subPath, strSize(basePrefix), strSize(subPath) - strSize(basePrefix) - 1);
// Add to list
callback(this, callbackData, subPath, storageTypePath, NULL);
}
// Get file list
XmlNodeList *fileList = xmlNodeChildList(blobs, AZURE_XML_TAG_BLOB_STR);
for (unsigned int fileIdx = 0; fileIdx < xmlNodeLstSize(fileList); fileIdx++)
{
const XmlNode *fileNode = xmlNodeLstGet(fileList, fileIdx);
// Get file name
const String *file = xmlNodeContent(xmlNodeChild(fileNode, AZURE_XML_TAG_NAME_STR, true));
// Strip off the base prefix when present
file = strEmpty(basePrefix) ? file : strSub(file, strSize(basePrefix));
// Add to list
callback(
this, callbackData, file, storageTypeFile, xmlNodeChild(fileNode, AZURE_XML_TAG_PROPERTIES_STR, true));
}
// Get the continuation token and store it in the outer temp context
MEM_CONTEXT_PRIOR_BEGIN()
{
marker = xmlNodeContent(xmlNodeChild(xmlRoot, AZURE_XML_TAG_NEXT_MARKER_STR, false));
}
MEM_CONTEXT_PRIOR_END();
}
MEM_CONTEXT_TEMP_END();
}
while (!strEq(marker, EMPTY_STR));
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN_VOID();
}
/**********************************************************************************************************************************/
static StorageInfo
storageAzureInfo(THIS_VOID, const String *file, StorageInfoLevel level, StorageInterfaceInfoParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, file);
FUNCTION_LOG_PARAM(ENUM, level);
(void)param; // No parameters are used
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(file != NULL);
// Attempt to get file info
HttpResponse *httpResponse = storageAzureRequestP(this, HTTP_VERB_HEAD_STR, .uri = file, .allowMissing = true);
// Does the file exist?
StorageInfo result = {.level = level, .exists = httpResponseCodeOk(httpResponse)};
// Add basic level info if requested and the file exists
if (result.level >= storageInfoLevelBasic && result.exists)
{
result.type = storageTypeFile;
result.size = cvtZToUInt64(strPtr(httpHeaderGet(httpResponseHeader(httpResponse), HTTP_HEADER_CONTENT_LENGTH_STR)));
result.timeModified = httpDateToTime(httpHeaderGet(httpResponseHeader(httpResponse), HTTP_HEADER_LAST_MODIFIED_STR));
}
FUNCTION_LOG_RETURN(STORAGE_INFO, result);
}
/**********************************************************************************************************************************/
typedef struct StorageAzureInfoListData
{
StorageInfoLevel level; // Level of info to set
StorageInfoListCallback callback; // User-supplied callback function
void *callbackData; // User-supplied callback data
} StorageAzureInfoListData;
static void
storageAzureInfoListCallback(StorageAzure *this, void *callbackData, const String *name, StorageType type, const XmlNode *xml)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STORAGE_AZURE, this);
FUNCTION_TEST_PARAM_P(VOID, callbackData);
FUNCTION_TEST_PARAM(STRING, name);
FUNCTION_TEST_PARAM(ENUM, type);
FUNCTION_TEST_PARAM(XML_NODE, xml);
FUNCTION_TEST_END();
(void)this; // Unused but still logged above for debugging
ASSERT(callbackData != NULL);
ASSERT(name != NULL);
StorageAzureInfoListData *data = (StorageAzureInfoListData *)callbackData;
StorageInfo info =
{
.name = name,
.level = data->level,
.exists = true,
};
if (data->level >= storageInfoLevelBasic)
{
info.type = type;
// Add additional info for files
if (type == storageTypeFile)
{
ASSERT(xml != NULL);
info.size = cvtZToUInt64(strPtr(xmlNodeContent(xmlNodeChild(xml, AZURE_XML_TAG_CONTENT_LENGTH_STR, true))));
info.timeModified = httpDateToTime(xmlNodeContent(xmlNodeChild(xml, AZURE_XML_TAG_LAST_MODIFIED_STR, true)));
}
}
data->callback(data->callbackData, &info);
FUNCTION_TEST_RETURN_VOID();
}
static bool
storageAzureInfoList(
THIS_VOID, const String *path, StorageInfoLevel level, StorageInfoListCallback callback, void *callbackData,
StorageInterfaceInfoListParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, path);
FUNCTION_LOG_PARAM(ENUM, level);
FUNCTION_LOG_PARAM(FUNCTIONP, callback);
FUNCTION_LOG_PARAM_P(VOID, callbackData);
FUNCTION_LOG_PARAM(STRING, param.expression);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(path != NULL);
ASSERT(callback != NULL);
MEM_CONTEXT_TEMP_BEGIN()
{
StorageAzureInfoListData data = {.level = level, .callback = callback, .callbackData = callbackData};
storageAzureListInternal(this, path, param.expression, false, storageAzureInfoListCallback, &data);
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(BOOL, true);
}
/**********************************************************************************************************************************/
static StorageRead *
storageAzureNewRead(THIS_VOID, const String *file, bool ignoreMissing, StorageInterfaceNewReadParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, file);
FUNCTION_LOG_PARAM(BOOL, ignoreMissing);
(void)param; // No parameters are used
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(file != NULL);
FUNCTION_LOG_RETURN(STORAGE_READ, storageReadAzureNew(this, file, ignoreMissing));
}
/**********************************************************************************************************************************/
static StorageWrite *
storageAzureNewWrite(THIS_VOID, const String *file, StorageInterfaceNewWriteParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, file);
(void)param; // No parameters are used
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(file != NULL);
ASSERT(param.createPath);
ASSERT(param.user == NULL);
ASSERT(param.group == NULL);
ASSERT(param.timeModified == 0);
FUNCTION_LOG_RETURN(STORAGE_WRITE, storageWriteAzureNew(this, file, this->fileId++, this->blockSize));
}
/**********************************************************************************************************************************/
typedef struct StorageAzurePathRemoveData
{
const String *path;
} StorageAzurePathRemoveData;
static void
storageAzurePathRemoveCallback(StorageAzure *this, void *callbackData, const String *name, StorageType type, const XmlNode *xml)
{
FUNCTION_TEST_BEGIN();
FUNCTION_TEST_PARAM(STORAGE_AZURE, this);
FUNCTION_TEST_PARAM_P(VOID, callbackData);
FUNCTION_TEST_PARAM(STRING, name);
FUNCTION_TEST_PARAM(ENUM, type);
(void)xml; // Unused since no additional data needed for files
FUNCTION_TEST_END();
ASSERT(this != NULL);
ASSERT(callbackData != NULL);
ASSERT(name != NULL);
// Only delete files since paths don't really exist
if (type == storageTypeFile)
{
StorageAzurePathRemoveData *data = (StorageAzurePathRemoveData *)callbackData;
storageInterfaceRemoveP(this, strNewFmt("%s/%s", strEq(data->path, FSLASH_STR) ? "" : strPtr(data->path), strPtr(name)));
}
FUNCTION_TEST_RETURN_VOID();
}
static bool
storageAzurePathRemove(THIS_VOID, const String *path, bool recurse, StorageInterfacePathRemoveParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, path);
FUNCTION_LOG_PARAM(BOOL, recurse);
(void)param; // No parameters are used
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(path != NULL);
MEM_CONTEXT_TEMP_BEGIN()
{
StorageAzurePathRemoveData data = {.path = path};
storageAzureListInternal(this, path, NULL, true, storageAzurePathRemoveCallback, &data);
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN(BOOL, true);
}
/**********************************************************************************************************************************/
static void
storageAzureRemove(THIS_VOID, const String *file, StorageInterfaceRemoveParam param)
{
THIS(StorageAzure);
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STORAGE_AZURE, this);
FUNCTION_LOG_PARAM(STRING, file);
FUNCTION_LOG_PARAM(BOOL, param.errorOnMissing);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(file != NULL);
ASSERT(!param.errorOnMissing);
storageAzureRequestP(this, HTTP_VERB_DELETE_STR, file, .allowMissing = true);
FUNCTION_LOG_RETURN_VOID();
}
/**********************************************************************************************************************************/
static const StorageInterface storageInterfaceAzure =
{
.info = storageAzureInfo,
.infoList = storageAzureInfoList,
.newRead = storageAzureNewRead,
.newWrite = storageAzureNewWrite,
.pathRemove = storageAzurePathRemove,
.remove = storageAzureRemove,
};
Storage *
storageAzureNew(
const String *path, bool write, StoragePathExpressionCallback pathExpressionFunction, const String *container,
const String *account, const String *key, size_t blockSize, const String *host, unsigned int port, TimeMSec timeout,
bool verifyPeer, const String *caFile, const String *caPath)
{
FUNCTION_LOG_BEGIN(logLevelDebug);
FUNCTION_LOG_PARAM(STRING, path);
FUNCTION_LOG_PARAM(BOOL, write);
FUNCTION_LOG_PARAM(FUNCTIONP, pathExpressionFunction);
FUNCTION_LOG_PARAM(STRING, container);
FUNCTION_TEST_PARAM(STRING, account);
FUNCTION_TEST_PARAM(STRING, key);
FUNCTION_LOG_PARAM(SIZE, blockSize);
FUNCTION_LOG_PARAM(STRING, host);
FUNCTION_LOG_PARAM(UINT, port);
FUNCTION_LOG_PARAM(TIME_MSEC, timeout);
FUNCTION_LOG_PARAM(BOOL, verifyPeer);
FUNCTION_LOG_PARAM(STRING, caFile);
FUNCTION_LOG_PARAM(STRING, caPath);
FUNCTION_LOG_END();
ASSERT(path != NULL);
ASSERT(container != NULL);
ASSERT(account != NULL);
ASSERT(key != NULL);
ASSERT(blockSize != 0);
Storage *this = NULL;
MEM_CONTEXT_NEW_BEGIN("StorageAzure")
{
StorageAzure *driver = memNew(sizeof(StorageAzure));
*driver = (StorageAzure)
{
.memContext = MEM_CONTEXT_NEW(),
.interface = storageInterfaceAzure,
.container = strDup(container),
.account = strDup(account),
.key = strDup(key),
.blockSize = blockSize,
.host = host == NULL ? strNewFmt("%s." AZURE_BLOB_HOST, strPtr(account)) : host,
.uriPrefix = host == NULL ?
strNewFmt("/%s", strPtr(container)) : strNewFmt("/%s/%s", strPtr(account), strPtr(container)),
};
// Create the http client used to service requests
driver->httpClient = httpClientNew(driver->host, port, timeout, verifyPeer, caFile, caPath);
// Create list of redacted headers
driver->headerRedactList = strLstNew();
strLstAdd(driver->headerRedactList, HTTP_HEADER_AUTHORIZATION_STR);
strLstAdd(driver->headerRedactList, HTTP_HEADER_DATE_STR);
// Generate starting file id
cryptoRandomBytes((unsigned char *)&driver->fileId, sizeof(driver->fileId));
this = storageNew(STORAGE_AZURE_TYPE_STR, path, 0, 0, write, pathExpressionFunction, driver, driver->interface);
}
MEM_CONTEXT_NEW_END();
FUNCTION_LOG_RETURN(STORAGE, this);
}

View File

@@ -0,0 +1,28 @@
/***********************************************************************************************************************************
Azure Storage
***********************************************************************************************************************************/
#ifndef STORAGE_AZURE_STORAGE_H
#define STORAGE_AZURE_STORAGE_H
#include "storage/storage.intern.h"
/***********************************************************************************************************************************
Storage type
***********************************************************************************************************************************/
#define STORAGE_AZURE_TYPE "azure"
STRING_DECLARE(STORAGE_AZURE_TYPE_STR);
/***********************************************************************************************************************************
Defaults
***********************************************************************************************************************************/
#define STORAGE_AZURE_BLOCKSIZE_MIN ((size_t)4 * 1024 * 1024)
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
Storage *storageAzureNew(
const String *path, bool write, StoragePathExpressionCallback pathExpressionFunction, const String *container,
const String *account, const String *key, size_t blockSize, const String *host, unsigned int port, TimeMSec timeout,
bool verifyPeer, const String *caFile, const String *caPath);
#endif

View File

@@ -0,0 +1,80 @@
/***********************************************************************************************************************************
Azure Storage Internal
***********************************************************************************************************************************/
#ifndef STORAGE_AZURE_STORAGE_INTERN_H
#define STORAGE_AZURE_STORAGE_INTERN_H
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct StorageAzure StorageAzure;
#include "common/io/http/request.h"
#include "storage/azure/storage.h"
/***********************************************************************************************************************************
Azure query tokens
***********************************************************************************************************************************/
#define AZURE_QUERY_COMP "comp"
STRING_DECLARE(AZURE_QUERY_COMP_STR);
#define AZURE_QUERY_RESTYPE "restype"
STRING_DECLARE(AZURE_QUERY_RESTYPE_STR);
#define AZURE_QUERY_VALUE_CONTAINER "container"
STRING_DECLARE(AZURE_QUERY_VALUE_CONTAINER_STR);
/***********************************************************************************************************************************
Perform an Azure Request
***********************************************************************************************************************************/
// Perform async request
typedef struct StorageAzureRequestAsyncParam
{
VAR_PARAM_HEADER;
const String *uri; // Request URI
const HttpHeader *header; // Request headers
const HttpQuery *query; // Query parameters
const Buffer *content; // Request content
} StorageAzureRequestAsyncParam;
#define storageAzureRequestAsyncP(this, verb, ...) \
storageAzureRequestAsync(this, verb, (StorageAzureRequestAsyncParam){VAR_PARAM_INIT, __VA_ARGS__})
HttpRequest *storageAzureRequestAsync(StorageAzure *this, const String *verb, StorageAzureRequestAsyncParam param);
// Get async response
typedef struct StorageAzureResponseParam
{
VAR_PARAM_HEADER;
bool allowMissing; // Allow missing files (caller can check response code)
bool contentIo; // Is IoRead interface required to read content?
} StorageAzureResponseParam;
#define storageAzureResponseP(request, ...) \
storageAzureResponse(request, (StorageAzureResponseParam){VAR_PARAM_INIT, __VA_ARGS__})
HttpResponse *storageAzureResponse(HttpRequest *request, StorageAzureResponseParam param);
typedef struct StorageAzureRequestParam
{
VAR_PARAM_HEADER;
const String *uri; // Request URI
const HttpHeader *header; // Request headers
const HttpQuery *query; // Query parameters
const Buffer *content; // Request content
bool allowMissing; // Allow missing files (caller can check response code)
bool contentIo; // Is IoRead interface required to read content?
} StorageAzureRequestParam;
#define storageAzureRequestP(this, verb, ...) \
storageAzureRequest(this, verb, (StorageAzureRequestParam){VAR_PARAM_INIT, __VA_ARGS__})
HttpResponse *storageAzureRequest(StorageAzure *this, const String *verb, StorageAzureRequestParam param);
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_STORAGE_AZURE_TYPE \
StorageAzure *
#define FUNCTION_LOG_STORAGE_AZURE_FORMAT(value, buffer, bufferSize) \
objToLog(value, "StorageAzure", buffer, bufferSize)
#endif

314
src/storage/azure/write.c Normal file
View File

@@ -0,0 +1,314 @@
/***********************************************************************************************************************************
Azure Storage File Write
***********************************************************************************************************************************/
#include "build.auto.h"
#include <string.h>
#include "common/debug.h"
#include "common/encode.h"
#include "common/io/write.intern.h"
#include "common/log.h"
#include "common/memContext.h"
#include "common/type/object.h"
#include "common/type/xml.h"
#include "storage/azure/write.h"
#include "storage/write.intern.h"
/***********************************************************************************************************************************
Azure HTTP headers
***********************************************************************************************************************************/
STRING_STATIC(AZURE_HEADER_BLOB_TYPE_STR, "x-ms-blob-type");
STRING_STATIC(AZURE_HEADER_VALUE_BLOCK_BLOB_STR, "BlockBlob");
/***********************************************************************************************************************************
Azure query tokens
***********************************************************************************************************************************/
STRING_STATIC(AZURE_QUERY_BLOCK_ID_STR, "blockid");
STRING_STATIC(AZURE_QUERY_VALUE_BLOCK_STR, "block");
STRING_STATIC(AZURE_QUERY_VALUE_BLOCK_LIST_STR, "blocklist");
/***********************************************************************************************************************************
XML tags
***********************************************************************************************************************************/
STRING_STATIC(AZURE_XML_TAG_BLOCK_LIST_STR, "BlockList");
STRING_STATIC(AZURE_XML_TAG_UNCOMMITTED_STR, "Uncommitted");
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
typedef struct StorageWriteAzure
{
MemContext *memContext; // Object mem context
StorageWriteInterface interface; // Interface
StorageAzure *storage; // Storage that created this object
HttpRequest *request; // Async block upload request
uint64_t fileId; // Id to used to make file block identifiers unique
size_t blockSize; // Size of blocks for multi-block upload
Buffer *blockBuffer; // Block buffer (stores data until blockSize is reached)
StringList *blockIdList; // List of uploaded block ids
} StorageWriteAzure;
/***********************************************************************************************************************************
Macros for function logging
***********************************************************************************************************************************/
#define FUNCTION_LOG_STORAGE_WRITE_AZURE_TYPE \
StorageWriteAzure *
#define FUNCTION_LOG_STORAGE_WRITE_AZURE_FORMAT(value, buffer, bufferSize) \
objToLog(value, "StorageWriteAzure", buffer, bufferSize)
/***********************************************************************************************************************************
Open the file
***********************************************************************************************************************************/
static void
storageWriteAzureOpen(THIS_VOID)
{
THIS(StorageWriteAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_WRITE_AZURE, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(this->blockBuffer == NULL);
// Allocate the block buffer
MEM_CONTEXT_BEGIN(this->memContext)
{
this->blockBuffer = bufNew(this->blockSize);
}
MEM_CONTEXT_END();
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
Flush bytes to upload block
***********************************************************************************************************************************/
static void
storageWriteAzureBlock(StorageWriteAzure *this)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_WRITE_AZURE, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
// If there is an outstanding async request then wait for the response. Since the part id has already been stored there is
// nothing to do except make sure the request did not error.
if (this->request != NULL)
{
storageAzureResponseP(this->request);
httpRequestFree(this->request);
this->request = NULL;
}
FUNCTION_LOG_RETURN_VOID();
}
static void
storageWriteAzureBlockAsync(StorageWriteAzure *this)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_WRITE_AZURE, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(this->blockBuffer != NULL);
ASSERT(bufSize(this->blockBuffer) > 0);
MEM_CONTEXT_TEMP_BEGIN()
{
// Complete prior async request, if any
storageWriteAzureBlock(this);
// Create the block id list
if (this->blockIdList == NULL)
{
MEM_CONTEXT_BEGIN(this->memContext)
{
this->blockIdList = strLstNew();
}
MEM_CONTEXT_END();
}
// Generate block id. Combine the block number with the provided file id to create a (hopefully) unique block id that won't
// overlap with any other process. This is to prevent another process from overwriting our blocks. If two processes are
// writing against the same file then there may be problems anyway but we need to at least ensure the result is consistent,
// i.e. we get all of one file or all of the other depending on who writes last.
const String *blockId = strNewFmt("%016" PRIX64 "x%07u", this->fileId, strLstSize(this->blockIdList));
// Upload the block and add to block list
HttpQuery *query = httpQueryNew();
httpQueryAdd(query, AZURE_QUERY_COMP_STR, AZURE_QUERY_VALUE_BLOCK_STR);
httpQueryAdd(query, AZURE_QUERY_BLOCK_ID_STR, blockId);
MEM_CONTEXT_BEGIN(this->memContext)
{
this->request = storageAzureRequestAsyncP(
this->storage, HTTP_VERB_PUT_STR, .uri = this->interface.name, .query = query, .content = this->blockBuffer);
}
MEM_CONTEXT_END();
strLstAdd(this->blockIdList, blockId);
}
MEM_CONTEXT_TEMP_END();
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
Write to internal buffer
***********************************************************************************************************************************/
static void
storageWriteAzure(THIS_VOID, const Buffer *buffer)
{
THIS(StorageWriteAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_WRITE_AZURE, this);
FUNCTION_LOG_PARAM(BUFFER, buffer);
FUNCTION_LOG_END();
ASSERT(this != NULL);
ASSERT(this->blockBuffer != NULL);
size_t bytesTotal = 0;
// Continue until the write buffer has been exhausted
do
{
// Copy as many bytes as possible into the block buffer
size_t bytesNext = bufRemains(this->blockBuffer) > bufUsed(buffer) - bytesTotal ?
bufUsed(buffer) - bytesTotal : bufRemains(this->blockBuffer);
bufCatSub(this->blockBuffer, buffer, bytesTotal, bytesNext);
bytesTotal += bytesNext;
// If the block buffer is full then write it
if (bufRemains(this->blockBuffer) == 0)
{
storageWriteAzureBlockAsync(this);
bufUsedZero(this->blockBuffer);
}
}
while (bytesTotal != bufUsed(buffer));
FUNCTION_LOG_RETURN_VOID();
}
/***********************************************************************************************************************************
Close the file
***********************************************************************************************************************************/
static void
storageWriteAzureClose(THIS_VOID)
{
THIS(StorageWriteAzure);
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_WRITE_AZURE, this);
FUNCTION_LOG_END();
ASSERT(this != NULL);
// Close if the file has not already been closed
if (this->blockBuffer != NULL)
{
MEM_CONTEXT_TEMP_BEGIN()
{
// If a multi-block upload was started we need to finish that way
if (this->blockIdList != NULL)
{
// If there is anything left in the block buffer then write it
if (bufUsed(this->blockBuffer) > 0)
storageWriteAzureBlockAsync(this);
// Complete prior async request, if any
storageWriteAzureBlock(this);
// Generate the xml block list
XmlDocument *blockXml = xmlDocumentNew(AZURE_XML_TAG_BLOCK_LIST_STR);
for (unsigned int blockIdx = 0; blockIdx < strLstSize(this->blockIdList); blockIdx++)
{
xmlNodeContentSet(
xmlNodeAdd(xmlDocumentRoot(blockXml), AZURE_XML_TAG_UNCOMMITTED_STR),
strLstGet(this->blockIdList, blockIdx));
}
// Finalize the multi-block upload
storageAzureRequestP(
this->storage, HTTP_VERB_PUT_STR, .uri = this->interface.name,
.query = httpQueryAdd(httpQueryNew(), AZURE_QUERY_COMP_STR, AZURE_QUERY_VALUE_BLOCK_LIST_STR),
.content = xmlDocumentBuf(blockXml));
}
// Else upload all the data in a single block
else
{
storageAzureRequestP(
this->storage, HTTP_VERB_PUT_STR, .uri = this->interface.name,
httpHeaderAdd(httpHeaderNew(NULL), AZURE_HEADER_BLOB_TYPE_STR, AZURE_HEADER_VALUE_BLOCK_BLOB_STR),
.content = this->blockBuffer);
}
bufFree(this->blockBuffer);
this->blockBuffer = NULL;
}
MEM_CONTEXT_TEMP_END();
}
FUNCTION_LOG_RETURN_VOID();
}
/**********************************************************************************************************************************/
StorageWrite *
storageWriteAzureNew(StorageAzure *storage, const String *name, uint64_t fileId, size_t blockSize)
{
FUNCTION_LOG_BEGIN(logLevelTrace);
FUNCTION_LOG_PARAM(STORAGE_AZURE, storage);
FUNCTION_LOG_PARAM(STRING, name);
FUNCTION_LOG_PARAM(UINT64, fileId);
FUNCTION_LOG_PARAM(UINT64, blockSize);
FUNCTION_LOG_END();
ASSERT(storage != NULL);
ASSERT(name != NULL);
StorageWrite *this = NULL;
MEM_CONTEXT_NEW_BEGIN("StorageWriteAzure")
{
StorageWriteAzure *driver = memNew(sizeof(StorageWriteAzure));
*driver = (StorageWriteAzure)
{
.memContext = MEM_CONTEXT_NEW(),
.storage = storage,
.fileId = fileId,
.blockSize = blockSize,
.interface = (StorageWriteInterface)
{
.type = STORAGE_AZURE_TYPE_STR,
.name = strDup(name),
.atomic = true,
.createPath = true,
.syncFile = true,
.syncPath = true,
.ioInterface = (IoWriteInterface)
{
.close = storageWriteAzureClose,
.open = storageWriteAzureOpen,
.write = storageWriteAzure,
},
},
};
this = storageWriteNew(driver, &driver->interface);
}
MEM_CONTEXT_NEW_END();
FUNCTION_LOG_RETURN(STORAGE_WRITE, this);
}

15
src/storage/azure/write.h Normal file
View File

@@ -0,0 +1,15 @@
/***********************************************************************************************************************************
Azure Storage File Write
***********************************************************************************************************************************/
#ifndef STORAGE_AZURE_WRITE_H
#define STORAGE_AZURE_WRITE_H
#include "storage/azure/storage.intern.h"
#include "storage/write.h"
/***********************************************************************************************************************************
Constructors
***********************************************************************************************************************************/
StorageWrite *storageWriteAzureNew(StorageAzure *storage, const String *name, uint64_t fileId, size_t blockSize);
#endif

View File

@@ -12,6 +12,7 @@ Storage Helper
#include "config/define.h"
#include "config/config.h"
#include "protocol/helper.h"
#include "storage/azure/storage.h"
#include "storage/cifs/storage.h"
#include "storage/posix/storage.h"
#include "storage/remote/storage.h"
@@ -350,6 +351,16 @@ storageRepoGet(const String *type, bool write)
STORAGE_MODE_FILE_DEFAULT, STORAGE_MODE_PATH_DEFAULT, write, storageRepoPathExpression,
protocolRemoteGet(protocolStorageTypeRepo, 1), cfgOptionUInt(cfgOptCompressLevelNetwork));
}
// Use Azure storage
else if (strEqZ(type, STORAGE_AZURE_TYPE))
{
result = storageAzureNew(
cfgOptionStr(cfgOptRepoPath), write, storageRepoPathExpression, cfgOptionStr(cfgOptRepoAzureContainer),
cfgOptionStr(cfgOptRepoAzureAccount), cfgOptionStr(cfgOptRepoAzureKey), STORAGE_AZURE_BLOCKSIZE_MIN,
cfgOptionStrNull(cfgOptRepoAzureHost), cfgOptionUInt(cfgOptRepoAzurePort), ioTimeoutMs(),
cfgOptionBool(cfgOptRepoAzureVerifyTls), cfgOptionStrNull(cfgOptRepoAzureCaFile),
cfgOptionStrNull(cfgOptRepoAzureCaPath));
}
// Use CIFS storage
else if (strEqZ(type, STORAGE_CIFS_TYPE))
{

View File

@@ -398,6 +398,20 @@ unit:
- name: storage
test:
# ----------------------------------------------------------------------------------------------------------------------------
- name: azure
total: 3
coverage:
storage/azure/read: full
storage/azure/storage: full
storage/azure/write: full
storage/helper: full
include:
- storage/storage
- storage/write
# ----------------------------------------------------------------------------------------------------------------------------
- name: cifs
total: 1

View File

@@ -1,10 +1,10 @@
run 002 - rmt 1, cmp lz4, error connect, storage s3, enc 0
==========================================================
run 002 - rmt 1, cmp lz4, error connect, storage azure, enc 0
=============================================================
stanza-create db - create required data for stanza (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-path=/ --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]

View File

@@ -1,10 +1,10 @@
run 002 - remote 1, storage s3, enc 1, cmp lz4
==============================================
run 002 - remote 1, storage azure, enc 1, cmp lz4
=================================================
stanza-create db - fail on missing control file (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online --log-level-file=[LOG-LEVEL-FILE] stanza-create
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 ERROR: [055]: raised from remote-0 protocol on 'db-primary': unable to open missing file '[TEST_PATH]/db-primary/db/base/global/pg_control' for read
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
@@ -14,7 +14,7 @@ P00 INFO: stanza-create command end: aborted with exception [055]
stanza-upgrade db - fail on stanza not initialized since archive.info is missing (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 ERROR: [055]: unable to load info file '/archive/db/archive.info' or '/archive/db/archive.info.copy':
FileMissingError: unable to open '/archive/db/archive.info': No such file or directory
FileMissingError: unable to open '/archive/db/archive.info.copy': No such file or directory
@@ -30,7 +30,7 @@ P00 INFO: stanza-upgrade command end: aborted with exception [055]
stanza-create db - successfully create the stanza (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
@@ -81,7 +81,7 @@ backrest-checksum="[CHECKSUM]"
stanza-create db - do not fail on rerun of stanza-create - info files exist and DB section ok (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-create
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 INFO: stanza 'db' already exists and is valid
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
@@ -133,7 +133,7 @@ backrest-checksum="[CHECKSUM]"
stanza-create db - fail on database mismatch and warn force option deprecated (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online --force stanza-create
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --force --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-create command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --force --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 WARN: option --force is no longer supported
P00 ERROR: [028]: backup and archive info files exist but do not match the database
HINT: is this the correct stanza?
@@ -188,7 +188,7 @@ backrest-checksum="[CHECKSUM]"
stanza-upgrade db - already up to date (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 INFO: stanza 'db' is already up to date
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
@@ -257,7 +257,7 @@ P00 ERROR: [044]: PostgreSQL version 9.4, system-id 1000000000000000094 do not
stanza-upgrade db - successful upgrade creates additional history (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
@@ -319,7 +319,7 @@ P00 INFO: archive-get command end: completed successfully
full backup - create first full backup (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --type=full --stanza=db backup
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db --start-fast --type=full
P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=full
P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 488ba4b8b98acc510bce86b8f16e3c1ed9886a29
P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 488ba4b8b98acc510bce86b8f16e3c1ed9886a29
P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000001 (16MB, 99%) checksum e40de8cea99dd469c3efb47f5f33a73c7390fb9c
@@ -332,7 +332,7 @@ P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
P00 INFO: backup command end: completed successfully
P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
@@ -386,16 +386,15 @@ log-path=[TEST_PATH]/backup/log
log-subprocess=[LOG-SUBPROCESS]
log-timestamp=n
protocol-timeout=60
repo1-azure-account=azAccount
repo1-azure-container=azContainer
repo1-azure-host=azure
repo1-azure-key=YXpLZXk=
repo1-azure-verify-tls=n
repo1-cipher-pass=x
repo1-cipher-type=aes-256-cbc
repo1-path=/
repo1-s3-bucket=pgbackrest-dev
repo1-s3-endpoint=s3.amazonaws.com
repo1-s3-key=accessKey1
repo1-s3-key-secret=verySecretKey1
repo1-s3-region=us-east-1
repo1-s3-verify-ssl=n
repo1-type=s3
repo1-type=azure
[global:backup]
archive-copy=y
@@ -404,7 +403,7 @@ start-fast=y
stanza-upgrade db - successfully upgrade (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
@@ -462,7 +461,7 @@ backrest-checksum="[CHECKSUM]"
stanza-upgrade db - upgrade fails with mismatched db-ids (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db --no-online stanza-upgrade
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-upgrade command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 ERROR: [028]: backup info file and archive info file do not match
archive: id = 2, version = 9.5, system-id = 1000000000000000095
backup : id = 3, version = 9.5, system-id = 1000000000000000095
@@ -525,7 +524,7 @@ backrest-checksum="[CHECKSUM]"
diff backup - diff changed to full backup (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --repo1-retention-full=2 --no-online --type=diff --stanza=db backup
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db --start-fast --type=diff
P00 INFO: backup command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level=3 --compress-level-network=1 --compress-type=lz4 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --no-online --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db --start-fast --type=diff
P00 WARN: no prior backup exists, diff backup has been changed to full
P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/RECOVERYXLOG (16MB, 33%) checksum 488ba4b8b98acc510bce86b8f16e3c1ed9886a29
P01 INFO: backup file db-primary:[TEST_PATH]/db-primary/db/base/pg_xlog/000000010000000100000002 (16MB, 66%) checksum 488ba4b8b98acc510bce86b8f16e3c1ed9886a29
@@ -539,7 +538,7 @@ P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]
P00 INFO: backup command end: completed successfully
P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: expire command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-retention-full=2 --repo1-type=azure --stanza=db
P00 INFO: remove archive path: /archive/db/9.3-1
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
@@ -594,16 +593,15 @@ log-path=[TEST_PATH]/backup/log
log-subprocess=[LOG-SUBPROCESS]
log-timestamp=n
protocol-timeout=60
repo1-azure-account=azAccount
repo1-azure-container=azContainer
repo1-azure-host=azure
repo1-azure-key=YXpLZXk=
repo1-azure-verify-tls=n
repo1-cipher-pass=x
repo1-cipher-type=aes-256-cbc
repo1-path=/
repo1-s3-bucket=pgbackrest-dev
repo1-s3-endpoint=s3.amazonaws.com
repo1-s3-key=accessKey1
repo1-s3-key-secret=verySecretKey1
repo1-s3-region=us-east-1
repo1-s3-verify-ssl=n
repo1-type=s3
repo1-type=azure
[global:backup]
archive-copy=y
@@ -612,7 +610,7 @@ start-fast=y
stanza-delete db - fail on missing stop file (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db stanza-delete
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 ERROR: [055]: stop file does not exist for stanza 'db'
HINT: has the pgbackrest stop command been run on this server for this stanza?
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
@@ -633,13 +631,13 @@ db
stop db stanza (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db stop
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stop command begin [BACKREST-VERSION]: --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stop command begin [BACKREST-VERSION]: --config=[TEST_PATH]/backup/pgbackrest.conf --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 INFO: stop command end: completed successfully
stanza-delete db - successfully delete the stanza (backup host)
> [CONTAINER-EXEC] backup [BACKREST-BIN] --config=[TEST_PATH]/backup/pgbackrest.conf --stanza=db stanza-delete
------------------------------------------------------------------------------------------------------------------------------------
P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-s3-bucket=pgbackrest-dev --repo1-s3-endpoint=s3.amazonaws.com --repo1-s3-key=<redacted> --repo1-s3-key-secret=<redacted> --repo1-s3-region=us-east-1 --no-repo1-s3-verify-tls --repo1-type=s3 --stanza=db
P00 INFO: stanza-delete command begin [BACKREST-VERSION]: --buffer-size=[BUFFER-SIZE] --compress-level-network=1 --config=[TEST_PATH]/backup/pgbackrest.conf --db-timeout=45 --lock-path=[TEST_PATH]/backup/lock --log-level-console=detail --log-level-file=[LOG-LEVEL-FILE] --log-level-stderr=off --log-path=[TEST_PATH]/backup/log[] --no-log-timestamp --pg1-host=db-primary --pg1-host-cmd=[BACKREST-BIN] --pg1-host-config=[TEST_PATH]/db-primary/pgbackrest.conf --pg1-host-user=[USER-1] --pg1-path=[TEST_PATH]/db-primary/db/base --protocol-timeout=60 --repo1-azure-account=<redacted> --repo1-azure-container=azContainer --repo1-azure-host=azure --repo1-azure-key=<redacted> --no-repo1-azure-verify-tls --repo1-cipher-pass=<redacted> --repo1-cipher-type=aes-256-cbc --repo1-path=/ --repo1-type=azure --stanza=db
P00 DETAIL: socket statistics:[SOCKET-STATISTICS]
P00 DETAIL: tls statistics:[TLS-STATISTICS]
P00 INFO: http statistics:[HTTP-STATISTICS]

View File

@@ -0,0 +1,81 @@
####################################################################################################################################
# Azure Test Host
####################################################################################################################################
package pgBackRestTest::Env::Host::HostAzureTest;
use parent 'pgBackRestTest::Common::HostTest';
####################################################################################################################################
# Perl includes
####################################################################################################################################
use strict;
use warnings FATAL => qw(all);
use Carp qw(confess);
use Cwd qw(abs_path);
use Exporter qw(import);
our @EXPORT = qw();
use File::Basename qw(dirname);
use Storable qw(dclone);
use pgBackRestDoc::Common::Exception;
use pgBackRestDoc::Common::Ini;
use pgBackRestDoc::Common::Log;
use pgBackRestDoc::ProjectInfo;
use pgBackRestTest::Common::ContainerTest;
use pgBackRestTest::Common::ExecuteTest;
use pgBackRestTest::Common::HostGroupTest;
use pgBackRestTest::Common::RunTest;
use pgBackRestTest::Common::StorageRepo;
use pgBackRestTest::Common::Wait;
use pgBackRestTest::Env::Host::HostBaseTest;
use pgBackRestTest::Env::Manifest;
####################################################################################################################################
# Azure defaults
####################################################################################################################################
use constant HOST_AZURE_ACCOUNT => 'azAccount';
push @EXPORT, qw(HOST_AZURE_ACCOUNT);
use constant HOST_AZURE_KEY => 'YXpLZXk=';
push @EXPORT, qw(HOST_AZURE_KEY);
use constant HOST_AZURE_CONTAINER => 'azContainer';
push @EXPORT, qw(HOST_AZURE_CONTAINER);
####################################################################################################################################
# new
####################################################################################################################################
sub new
{
my $class = shift; # Class name
# Assign function parameters, defaults, and log debug info
my
(
$strOperation,
) =
logDebugParam
(
__PACKAGE__ . '->new', \@_,
);
# Create the host
my $strProjectPath = dirname(dirname(abs_path($0)));
my $strFakeCertPath = "${strProjectPath}/doc/resource/fake-cert";
my $self = $class->SUPER::new(
HOST_AZURE, 'test-' . testRunGet()->vmId() . '-' . HOST_AZURE, 'mcr.microsoft.com/azure-storage/azurite', 'root', 'u18',
["${strFakeCertPath}/s3-server.crt:/root/public.crt:ro",
"${strFakeCertPath}/s3-server.key:/root/private.key:ro"],
'-e AZURITE_ACCOUNTS="' . HOST_AZURE_ACCOUNT . ':' . HOST_AZURE_KEY . '"',
'azurite-blob --blobPort 443 --blobHost 0.0.0.0 --cert=/root/public.crt --key=/root/private.key -d debug.log', false);
bless $self, $class;
# Return from function and log return values if any
return logDebugReturn
(
$strOperation,
{name => 'self', value => $self, trace => true}
);
}
1;

View File

@@ -29,6 +29,7 @@ use pgBackRestTest::Common::StorageBase;
use pgBackRestTest::Common::StorageRepo;
use pgBackRestTest::Env::ArchiveInfo;
use pgBackRestTest::Env::BackupInfo;
use pgBackRestTest::Env::Host::HostAzureTest;
use pgBackRestTest::Env::Host::HostBaseTest;
use pgBackRestTest::Env::Host::HostS3Test;
use pgBackRestTest::Env::Manifest;
@@ -73,6 +74,8 @@ use constant CFGOPTVAL_BACKUP_TYPE_INCR => 'incr';
use constant CFGOPTVAL_REPO_CIPHER_TYPE_AES_256_CBC => 'aes-256-cbc';
push @EXPORT, qw(CFGOPTVAL_REPO_CIPHER_TYPE_AES_256_CBC);
use constant AZURE => 'azure';
push @EXPORT, qw(AZURE);
use constant CIFS => 'cifs';
push @EXPORT, qw(CIFS);
use constant POSIX => STORAGE_POSIX;
@@ -1193,6 +1196,15 @@ sub configCreate
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-s3-region'} = HOST_S3_REGION;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-s3-verify-ssl'} = 'n';
}
elsif ($oParam->{strStorage} eq AZURE)
{
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-type'} = AZURE;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-account'} = HOST_AZURE_ACCOUNT;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-key'} = HOST_AZURE_KEY;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-container'} = HOST_AZURE_CONTAINER;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-host'} = HOST_AZURE;
$oParamHash{&CFGDEF_SECTION_GLOBAL}{'repo1-azure-verify-tls'} = 'n';
}
if (defined($$oParam{bHardlink}) && $$oParam{bHardlink})
{

View File

@@ -37,6 +37,8 @@ use constant HOST_DB_STANDBY => 'db-stand
push @EXPORT, qw(HOST_DB_STANDBY);
use constant HOST_BACKUP => 'backup';
push @EXPORT, qw(HOST_BACKUP);
use constant HOST_AZURE => 'azure';
push @EXPORT, qw(HOST_AZURE);
use constant HOST_S3 => 's3-server';
push @EXPORT, qw(HOST_S3);

View File

@@ -26,6 +26,7 @@ use pgBackRestTest::Common::RunTest;
use pgBackRestTest::Common::StorageBase;
use pgBackRestTest::Common::StorageRepo;
use pgBackRestTest::Env::ArchiveInfo;
use pgBackRestTest::Env::Host::HostAzureTest;
use pgBackRestTest::Env::Host::HostBackupTest;
use pgBackRestTest::Env::Host::HostBaseTest;
use pgBackRestTest::Env::Host::HostDbCommonTest;
@@ -61,6 +62,10 @@ sub setup
{
$oHostObject = new pgBackRestTest::Env::Host::HostS3Test();
}
elsif ($oConfigParam->{strStorage} eq AZURE)
{
$oHostObject = new pgBackRestTest::Env::Host::HostAzureTest();
}
# Get host group
my $oHostGroup = hostGroupGet();
@@ -123,6 +128,10 @@ sub setup
{
$oHostGroup->hostAdd($oHostObject, {rstryHostName => ['pgbackrest-dev.s3.amazonaws.com', 's3.amazonaws.com']});
}
elsif ($oConfigParam->{strStorage} eq AZURE)
{
$oHostGroup->hostAdd($oHostObject);
}
# Create db-primary config
$oHostDbPrimary->configCreate({

View File

@@ -207,11 +207,11 @@ sub run
(
{vm => VM1, remote => false, storage => S3, encrypt => false, delta => true, compress => LZ4},
{vm => VM1, remote => true, storage => POSIX, encrypt => true, delta => false, compress => BZ2},
{vm => VM2, remote => false, storage => POSIX, encrypt => true, delta => true, compress => BZ2},
{vm => VM2, remote => true, storage => S3, encrypt => false, delta => false, compress => GZ},
{vm => VM2, remote => false, storage => POSIX, encrypt => false, delta => true, compress => BZ2},
{vm => VM2, remote => true, storage => AZURE, encrypt => true, delta => false, compress => GZ},
{vm => VM3, remote => false, storage => POSIX, encrypt => false, delta => true, compress => ZST},
{vm => VM3, remote => true, storage => S3, encrypt => true, delta => false, compress => LZ4},
{vm => VM4, remote => false, storage => POSIX, encrypt => false, delta => false, compress => GZ},
{vm => VM4, remote => false, storage => AZURE, encrypt => false, delta => false, compress => GZ},
{vm => VM4, remote => true, storage => S3, encrypt => true, delta => true, compress => ZST},
)
{
@@ -493,7 +493,7 @@ sub run
# Pass bogus socket path to make sure it is passed through the protocol layer (it won't be used)
($bRemote ? ' --pg1-socket-path=/test_socket_path' : '') .
' --buffer-size=16384 --checksum-page --process-max=1',
strRepoType => $strStorage eq S3 ? undef : CIFS, strTest => $strTestPoint, fTestDelay => 0});
strRepoType => $strStorage eq POSIX ? CIFS : undef, strTest => $strTestPoint, fTestDelay => 0});
$oManifest{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_PROCESS_MAX} = $strStorage eq S3 ? 2 : 1;
$oManifest{&MANIFEST_SECTION_BACKUP_OPTION}{&MANIFEST_KEY_BUFFER_SIZE} = 65536;
@@ -583,7 +583,7 @@ sub run
# Create a temp file in backup temp root to be sure it's deleted correctly
my $strTempFile = "${strResumePath}/file.tmp";
if ($strStorage eq S3)
if ($strStorage ne POSIX)
{
storageRepo()->put($strTempFile, "TEMP");
}

View File

@@ -44,11 +44,11 @@ sub run
foreach my $rhRun
(
{vm => VM1, remote => false, storage => POSIX, encrypt => false, compress => LZ4, error => 0},
{vm => VM1, remote => true, storage => S3, encrypt => true, compress => GZ, error => 1},
{vm => VM1, remote => true, storage => AZURE, encrypt => true, compress => GZ, error => 1},
{vm => VM2, remote => false, storage => S3, encrypt => false, compress => NONE, error => 0},
{vm => VM2, remote => true, storage => POSIX, encrypt => true, compress => BZ2, error => 0},
{vm => VM3, remote => false, storage => POSIX, encrypt => true, compress => NONE, error => 0},
{vm => VM3, remote => true, storage => S3, encrypt => false, compress => LZ4, error => 1},
{vm => VM3, remote => true, storage => AZURE, encrypt => false, compress => LZ4, error => 1},
{vm => VM4, remote => false, storage => S3, encrypt => true, compress => ZST, error => 0},
{vm => VM4, remote => true, storage => POSIX, encrypt => false, compress => NONE, error => 0},
)

View File

@@ -84,12 +84,12 @@ sub run
foreach my $rhRun
(
{vm => VM1, remote => false, storage => POSIX, encrypt => false, compress => GZ},
{vm => VM1, remote => true, storage => S3, encrypt => true, compress => LZ4},
{vm => VM2, remote => false, storage => S3, encrypt => false, compress => BZ2},
{vm => VM1, remote => true, storage => AZURE, encrypt => false, compress => LZ4},
{vm => VM2, remote => false, storage => S3, encrypt => true, compress => BZ2},
{vm => VM2, remote => true, storage => POSIX, encrypt => true, compress => BZ2},
{vm => VM3, remote => false, storage => POSIX, encrypt => true, compress => LZ4},
{vm => VM3, remote => true, storage => S3, encrypt => false, compress => ZST},
{vm => VM4, remote => false, storage => S3, encrypt => true, compress => ZST},
{vm => VM4, remote => false, storage => AZURE, encrypt => true, compress => ZST},
{vm => VM4, remote => true, storage => POSIX, encrypt => false, compress => GZ},
)
{

View File

@@ -48,7 +48,7 @@ sub run
{vm => VM1, storage => POSIX, encrypt => false},
{vm => VM2, storage => S3, encrypt => true},
{vm => VM3, storage => POSIX, encrypt => false},
{vm => VM4, storage => S3, encrypt => true},
{vm => VM4, storage => AZURE, encrypt => true},
)
{
# Only run tests for this vm

View File

@@ -43,10 +43,10 @@ sub run
(
{vm => VM1, remote => false, storage => POSIX, encrypt => true, compress => LZ4},
{vm => VM1, remote => true, storage => S3, encrypt => false, compress => BZ2},
{vm => VM2, remote => false, storage => S3, encrypt => true, compress => BZ2},
{vm => VM2, remote => false, storage => AZURE, encrypt => true, compress => BZ2},
{vm => VM2, remote => true, storage => POSIX, encrypt => false, compress => GZ},
{vm => VM3, remote => false, storage => POSIX, encrypt => false, compress => ZST},
{vm => VM3, remote => true, storage => S3, encrypt => true, compress => LZ4},
{vm => VM3, remote => true, storage => AZURE, encrypt => true, compress => LZ4},
{vm => VM4, remote => false, storage => S3, encrypt => false, compress => GZ},
{vm => VM4, remote => true, storage => POSIX, encrypt => true, compress => ZST},
)

View File

@@ -53,18 +53,18 @@ sub run
foreach my $rhRun
(
{pg => PG_VERSION_83, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => false, compress => NONE},
{pg => PG_VERSION_84, repoDest => HOST_BACKUP, storage => S3, encrypt => true, compress => GZ},
{pg => PG_VERSION_84, repoDest => HOST_BACKUP, storage => AZURE, encrypt => true, compress => GZ},
{pg => PG_VERSION_90, repoDest => HOST_DB_PRIMARY, storage => POSIX, encrypt => true, compress => BZ2},
{pg => PG_VERSION_91, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => false, compress => NONE},
{pg => PG_VERSION_92, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => NONE},
{pg => PG_VERSION_93, repoDest => HOST_BACKUP, storage => S3, encrypt => false, compress => GZ},
{pg => PG_VERSION_93, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => GZ},
{pg => PG_VERSION_94, repoDest => HOST_DB_STANDBY, storage => POSIX, encrypt => true, compress => LZ4},
{pg => PG_VERSION_95, repoDest => HOST_BACKUP, storage => S3, encrypt => false, compress => BZ2},
{pg => PG_VERSION_96, repoDest => HOST_BACKUP, storage => POSIX, encrypt => false, compress => NONE},
{pg => PG_VERSION_10, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => true, compress => GZ},
{pg => PG_VERSION_11, repoDest => HOST_BACKUP, storage => POSIX, encrypt => false, compress => ZST},
{pg => PG_VERSION_11, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => ZST},
{pg => PG_VERSION_12, repoDest => HOST_DB_STANDBY, storage => S3, encrypt => true, compress => LZ4},
{pg => PG_VERSION_13, repoDest => HOST_BACKUP, storage => POSIX, encrypt => false, compress => ZST},
{pg => PG_VERSION_13, repoDest => HOST_BACKUP, storage => AZURE, encrypt => false, compress => ZST},
)
{
# Only run tests for this pg version

View File

@@ -182,6 +182,15 @@ testRun(void)
"\n",
"Repository Options:\n"
"\n"
" --repo-azure-account azure repository account\n"
" --repo-azure-ca-file azure repository TLS CA file\n"
" --repo-azure-ca-path azure repository TLS CA path\n"
" --repo-azure-container azure repository container\n"
" --repo-azure-host azure repository host\n"
" --repo-azure-key azure repository shared key\n"
" --repo-azure-port azure repository server port [default=443]\n"
" --repo-azure-verify-tls azure repository server certificate verify\n"
" [default=y]\n"
" --repo-cipher-pass repository cipher passphrase\n"
" [current=<redacted>]\n"
" --repo-cipher-type cipher used to encrypt the repository\n"

View File

@@ -0,0 +1,783 @@
/***********************************************************************************************************************************
Test Azure Storage
***********************************************************************************************************************************/
#include "common/io/handleRead.h"
#include "common/io/handleWrite.h"
#include "common/harnessConfig.h"
#include "common/harnessFork.h"
#include "common/harnessStorage.h"
#include "common/harnessTls.h"
/***********************************************************************************************************************************
Constants
***********************************************************************************************************************************/
#define TEST_ACCOUNT "account"
STRING_STATIC(TEST_ACCOUNT_STR, TEST_ACCOUNT);
#define TEST_CONTAINER "container"
STRING_STATIC(TEST_CONTAINER_STR, TEST_CONTAINER);
#define TEST_KEY "YXpLZXk="
STRING_STATIC(TEST_KEY_STR, TEST_KEY);
/***********************************************************************************************************************************
Helper to build test requests
***********************************************************************************************************************************/
typedef struct TestRequestParam
{
VAR_PARAM_HEADER;
const char *content;
const char *blobType;
} TestRequestParam;
#define testRequestP(verb, uri, ...) \
testRequest(verb, uri, (TestRequestParam){VAR_PARAM_INIT, __VA_ARGS__})
static void
testRequest(const char *verb, const char *uri, TestRequestParam param)
{
// Add authorization string
String *request = strNewFmt(
"%s /" TEST_ACCOUNT "/" TEST_CONTAINER "%s HTTP/1.1\r\n"
"authorization:SharedKey account:????????????????????????????????????????????\r\n",
verb, uri);
// Add content-length
strCatFmt(request, "content-length:%zu\r\n", param.content == NULL ? 0 : strlen(param.content));
// Add md5
if (param.content != NULL)
{
char md5Hash[HASH_TYPE_MD5_SIZE_HEX];
encodeToStr(encodeBase64, bufPtr(cryptoHashOne(HASH_TYPE_MD5_STR, BUFSTRZ(param.content))), HASH_TYPE_M5_SIZE, md5Hash);
strCatFmt(request, "content-md5:%s\r\n", md5Hash);
}
// Add date
strCatZ(request, "date:???, ?? ??? ???? ??:??:?? GMT\r\n");
// Add host
strCatFmt(request, "host:%s\r\n", strPtr(hrnTlsServerHost()));
// Add blob type
if (param.blobType != NULL)
strCatFmt(request, "x-ms-blob-type:%s\r\n", param.blobType);
// Add version
strCatZ(request, "x-ms-version:2019-02-02\r\n");
// Complete headers
strCatZ(request, "\r\n");
// Add content
if (param.content != NULL)
strCatZ(request, param.content);
hrnTlsServerExpect(request);
}
/***********************************************************************************************************************************
Helper to build test responses
***********************************************************************************************************************************/
typedef struct TestResponseParam
{
VAR_PARAM_HEADER;
unsigned int code;
const char *header;
const char *content;
} TestResponseParam;
#define testResponseP(...) \
testResponse((TestResponseParam){VAR_PARAM_INIT, __VA_ARGS__})
static void
testResponse(TestResponseParam param)
{
// Set code to 200 if not specified
param.code = param.code == 0 ? 200 : param.code;
// Output header and code
String *response = strNewFmt("HTTP/1.1 %u ", param.code);
// Add reason for some codes
switch (param.code)
{
case 200:
{
strCatZ(response, "OK");
break;
}
case 403:
{
strCatZ(response, "Forbidden");
break;
}
}
// End header
strCatZ(response, "\r\n");
// Headers
if (param.header != NULL)
strCatFmt(response, "%s\r\n", param.header);
// Content
if (param.content != NULL)
{
strCatFmt(
response,
"content-length:%zu\r\n"
"\r\n"
"%s",
strlen(param.content), param.content);
}
else
strCatZ(response, "\r\n");
hrnTlsServerReply(response);
}
/***********************************************************************************************************************************
Test Run
***********************************************************************************************************************************/
void
testRun(void)
{
FUNCTION_HARNESS_VOID();
// *****************************************************************************************************************************
if (testBegin("storageRepoGet()"))
{
// Test without the host option since that can't be run in a unit test without updating dns or /etc/hosts
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("storage with default options");
StringList *argList = strLstNew();
strLstAddZ(argList, "--" CFGOPT_STANZA "=test");
strLstAddZ(argList, "--" CFGOPT_REPO1_TYPE "=" STORAGE_AZURE_TYPE);
strLstAddZ(argList, "--" CFGOPT_REPO1_PATH "=/repo");
strLstAddZ(argList, "--" CFGOPT_REPO1_AZURE_CONTAINER "=" TEST_CONTAINER);
setenv("PGBACKREST_" CFGOPT_REPO1_AZURE_ACCOUNT, TEST_ACCOUNT, true);
setenv("PGBACKREST_" CFGOPT_REPO1_AZURE_KEY, TEST_KEY, true);
harnessCfgLoad(cfgCmdArchivePush, argList);
Storage *storage = NULL;
TEST_ASSIGN(storage, storageRepoGet(strNew(STORAGE_AZURE_TYPE), false), "get repo storage");
TEST_RESULT_STR_Z(storage->path, "/repo", " check path");
TEST_RESULT_STR(((StorageAzure *)storage->driver)->account, TEST_ACCOUNT_STR, " check account");
TEST_RESULT_STR(((StorageAzure *)storage->driver)->container, TEST_CONTAINER_STR, " check container");
TEST_RESULT_STR(((StorageAzure *)storage->driver)->key, TEST_KEY_STR, " check key");
TEST_RESULT_STR_Z(((StorageAzure *)storage->driver)->host, TEST_ACCOUNT ".blob.core.windows.net", " check host");
TEST_RESULT_STR_Z(((StorageAzure *)storage->driver)->uriPrefix, "/" TEST_CONTAINER, " check uri prefix");
TEST_RESULT_UINT(((StorageAzure *)storage->driver)->blockSize, STORAGE_AZURE_BLOCKSIZE_MIN, " check block size");
TEST_RESULT_BOOL(storageFeature(storage, storageFeaturePath), false, " check path feature");
TEST_RESULT_BOOL(storageFeature(storage, storageFeatureCompress), false, " check compress feature");
}
// *****************************************************************************************************************************
if (testBegin("storageAzureAuth()"))
{
StorageAzure *storage = NULL;
HttpHeader *header = NULL;
const String *dateTime = STRDEF("Sun, 21 Jun 2020 12:46:19 GMT");
TEST_ASSIGN(
storage,
(StorageAzure *)storageDriver(
storageAzureNew(
STRDEF("/repo"), false, NULL, TEST_CONTAINER_STR, TEST_ACCOUNT_STR, TEST_KEY_STR, 16, NULL, 443, 1000, true,
NULL, NULL)),
"new azure storage");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("minimal auth");
header = httpHeaderAdd(httpHeaderNew(NULL), HTTP_HEADER_CONTENT_LENGTH_STR, ZERO_STR);
TEST_RESULT_VOID(storageAzureAuth(storage, HTTP_VERB_GET_STR, STRDEF("/path"), NULL, dateTime, header), "auth");
TEST_RESULT_STR_Z(
httpHeaderToLog(header),
"{authorization: 'SharedKey account:edqgT7EhsiIN3q6Al2HCZlpXr2D5cJFavr2ZCkhG9R8=', content-length: '0'"
", date: 'Sun, 21 Jun 2020 12:46:19 GMT', host: 'account.blob.core.windows.net', x-ms-version: '2019-02-02'}",
"check headers");
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("auth with md5 and query");
header = httpHeaderAdd(httpHeaderNew(NULL), HTTP_HEADER_CONTENT_LENGTH_STR, STRDEF("44"));
httpHeaderAdd(header, HTTP_HEADER_CONTENT_MD5_STR, STRDEF("b64f49553d5c441652e95697a2c5949e"));
HttpQuery *query = httpQueryAdd(httpQueryNew(), STRDEF("a"), STRDEF("b"));
TEST_RESULT_VOID(storageAzureAuth(storage, HTTP_VERB_GET_STR, STRDEF("/path/file"), query, dateTime, header), "auth");
TEST_RESULT_STR_Z(
httpHeaderToLog(header),
"{authorization: 'SharedKey account:5qAnroLtbY8IWqObx8+UVwIUysXujsfWZZav7PrBON0=', content-length: '44'"
", content-md5: 'b64f49553d5c441652e95697a2c5949e', date: 'Sun, 21 Jun 2020 12:46:19 GMT'"
", host: 'account.blob.core.windows.net', x-ms-version: '2019-02-02'}",
"check headers");
}
// *****************************************************************************************************************************
if (testBegin("StorageAzure, StorageReadAzure, and StorageWriteAzure"))
{
HARNESS_FORK_BEGIN()
{
HARNESS_FORK_CHILD_BEGIN(0, true)
{
TEST_RESULT_VOID(
hrnTlsServerRun(ioHandleReadNew(strNew("azure server read"), HARNESS_FORK_CHILD_READ(), 5000)),
"azure server begin");
}
HARNESS_FORK_CHILD_END();
HARNESS_FORK_PARENT_BEGIN()
{
hrnTlsClientBegin(ioHandleWriteNew(strNew("azure client write"), HARNESS_FORK_PARENT_WRITE_PROCESS(0)));
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("test against local host");
StringList *argList = strLstNew();
strLstAddZ(argList, "--" CFGOPT_STANZA "=test");
strLstAddZ(argList, "--" CFGOPT_REPO1_TYPE "=" STORAGE_AZURE_TYPE);
strLstAddZ(argList, "--" CFGOPT_REPO1_PATH "=/");
strLstAddZ(argList, "--" CFGOPT_REPO1_AZURE_CONTAINER "=" TEST_CONTAINER);
strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_AZURE_HOST "=%s", strPtr(hrnTlsServerHost())));
strLstAdd(argList, strNewFmt("--" CFGOPT_REPO1_AZURE_PORT "=%u", hrnTlsServerPort()));
strLstAdd(argList, strNewFmt("--%s" CFGOPT_REPO1_AZURE_VERIFY_TLS, testContainer() ? "" : "no-"));
setenv("PGBACKREST_" CFGOPT_REPO1_AZURE_ACCOUNT, TEST_ACCOUNT, true);
setenv("PGBACKREST_" CFGOPT_REPO1_AZURE_KEY, TEST_KEY, true);
harnessCfgLoad(cfgCmdArchivePush, argList);
Storage *storage = NULL;
TEST_ASSIGN(storage, storageRepoGet(strNew(STORAGE_AZURE_TYPE), true), "get repo storage");
StorageAzure *driver = (StorageAzure *)storage->driver;
TEST_RESULT_STR(driver->host, hrnTlsServerHost(), " check host");
TEST_RESULT_STR_Z(driver->uriPrefix, "/" TEST_ACCOUNT "/" TEST_CONTAINER, " check uri prefix");
TEST_RESULT_BOOL(driver->fileId == 0, false, " check file id");
// Tests need the block size to be 16
driver->blockSize = 16;
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("ignore missing file");
hrnTlsServerAccept();
testRequestP(HTTP_VERB_GET, "/fi%26le.txt");
testResponseP(.code = 404);
TEST_RESULT_PTR(
storageGetP(storageNewReadP(storage, strNew("fi&le.txt"), .ignoreMissing = true)), NULL, "get file");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("error on missing file");
testRequestP(HTTP_VERB_GET, "/file.txt");
testResponseP(.code = 404);
TEST_ERROR(
storageGetP(storageNewReadP(storage, strNew("file.txt"))), FileMissingError,
"unable to open '/file.txt': No such file or directory");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("get file");
testRequestP(HTTP_VERB_GET, "/file.txt");
testResponseP(.content = "this is a sample file");
TEST_RESULT_STR_Z(
strNewBuf(storageGetP(storageNewReadP(storage, strNew("file.txt")))), "this is a sample file", "get file");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("get zero-length file");
testRequestP(HTTP_VERB_GET, "/file0.txt");
testResponseP();
TEST_RESULT_STR_Z(
strNewBuf(storageGetP(storageNewReadP(storage, strNew("file0.txt")))), "", "get zero-length file");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("non-404 error");
testRequestP(HTTP_VERB_GET, "/file.txt");
testResponseP(.code = 303, .content = "CONTENT");
StorageRead *read = NULL;
TEST_ASSIGN(read, storageNewReadP(storage, strNew("file.txt"), .ignoreMissing = true), "new read file");
TEST_RESULT_BOOL(storageReadIgnoreMissing(read), true, " check ignore missing");
TEST_RESULT_STR_Z(storageReadName(read), "/file.txt", " check name");
TEST_ERROR_FMT(
ioReadOpen(storageReadIo(read)), ProtocolError,
"HTTP request failed with 303:\n"
"*** URI/Query ***:\n"
"/account/container/file.txt\n"
"*** Request Headers ***:\n"
"authorization: <redacted>\n"
"content-length: 0\n"
"date: <redacted>\n"
"host: %s\n"
"x-ms-version: 2019-02-02\n"
"*** Response Headers ***:\n"
"content-length: 7\n"
"*** Response Content ***:\n"
"CONTENT",
strPtr(hrnTlsServerHost()));
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write error");
testRequestP(HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testResponseP(.code = 403);
TEST_ERROR_FMT(
storagePutP(storageNewWriteP(storage, strNew("file.txt")), BUFSTRDEF("ABCD")), ProtocolError,
"HTTP request failed with 403 (Forbidden):\n"
"*** URI/Query ***:\n"
"/account/container/file.txt\n"
"*** Request Headers ***:\n"
"authorization: <redacted>\n"
"content-length: 4\n"
"content-md5: ywjKSnu1+Wg8GRM6hIcspw==\n"
"date: <redacted>\n"
"host: %s\n"
"x-ms-blob-type: BlockBlob\n"
"x-ms-version: 2019-02-02",
strPtr(hrnTlsServerHost()));
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in one part (with retry)");
testRequestP(HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testResponseP(.code = 503);
testRequestP(HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "ABCD");
testResponseP();
StorageWrite *write = NULL;
TEST_ASSIGN(write, storageNewWriteP(storage, strNew("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, BUFSTRDEF("ABCD")), "write");
TEST_RESULT_BOOL(storageWriteAtomic(write), true, "write is atomic");
TEST_RESULT_BOOL(storageWriteCreatePath(write), true, "path will be created");
TEST_RESULT_UINT(storageWriteModeFile(write), 0, "file mode is 0");
TEST_RESULT_UINT(storageWriteModePath(write), 0, "path mode is 0");
TEST_RESULT_STR_Z(storageWriteName(write), "/file.txt", "check file name");
TEST_RESULT_BOOL(storageWriteSyncFile(write), true, "file is synced");
TEST_RESULT_BOOL(storageWriteSyncPath(write), true, "path is synced");
TEST_RESULT_VOID(storageWriteAzureClose(write->driver), "close file again");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write zero-length file");
testRequestP(HTTP_VERB_PUT, "/file.txt", .blobType = "BlockBlob", .content = "");
testResponseP();
TEST_ASSIGN(write, storageNewWriteP(storage, strNew("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, NULL), "write");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with nothing left over on close");
testRequestP(HTTP_VERB_PUT, "/file.txt?blockid=0AAAAAAACCCCCCCCx0000000&comp=block", .content = "1234567890123456");
testResponseP();
testRequestP(HTTP_VERB_PUT, "/file.txt?blockid=0AAAAAAACCCCCCCCx0000001&comp=block", .content = "7890123456789012");
testResponseP();
testRequestP(
HTTP_VERB_PUT, "/file.txt?comp=blocklist",
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<BlockList>"
"<Uncommitted>0AAAAAAACCCCCCCCx0000000</Uncommitted>"
"<Uncommitted>0AAAAAAACCCCCCCCx0000001</Uncommitted>"
"</BlockList>\n");
testResponseP();
// Test needs a predictable file id
driver->fileId = 0x0AAAAAAACCCCCCCC;
TEST_ASSIGN(write, storageNewWriteP(storage, strNew("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, BUFSTRDEF("12345678901234567890123456789012")), "write");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("write file in chunks with something left over on close");
testRequestP(HTTP_VERB_PUT, "/file.txt?blockid=0AAAAAAACCCCCCCDx0000000&comp=block", .content = "1234567890123456");
testResponseP();
testRequestP(HTTP_VERB_PUT, "/file.txt?blockid=0AAAAAAACCCCCCCDx0000001&comp=block", .content = "7890");
testResponseP();
testRequestP(
HTTP_VERB_PUT, "/file.txt?comp=blocklist",
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<BlockList>"
"<Uncommitted>0AAAAAAACCCCCCCDx0000000</Uncommitted>"
"<Uncommitted>0AAAAAAACCCCCCCDx0000001</Uncommitted>"
"</BlockList>\n");
testResponseP();
TEST_ASSIGN(write, storageNewWriteP(storage, strNew("file.txt")), "new write");
TEST_RESULT_VOID(storagePutP(write, BUFSTRDEF("12345678901234567890")), "write");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("info for missing file");
testRequestP(HTTP_VERB_HEAD, "/BOGUS");
testResponseP(.code = 404);
TEST_RESULT_BOOL(
storageInfoP(storage, strNew("BOGUS"), .ignoreMissing = true).exists, false, "file does not exist");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("info for file");
testRequestP(HTTP_VERB_HEAD, "/subdir/file1.txt");
testResponseP(.header = "content-length:9999\r\nLast-Modified: Wed, 21 Oct 2015 07:28:00 GMT");
StorageInfo info;
TEST_ASSIGN(info, storageInfoP(storage, strNew("subdir/file1.txt")), "file exists");
TEST_RESULT_BOOL(info.exists, true, " check exists");
TEST_RESULT_UINT(info.type, storageTypeFile, " check type");
TEST_RESULT_UINT(info.size, 9999, " check exists");
TEST_RESULT_INT(info.timeModified, 1445412480, " check time");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("info check existence only");
testRequestP(HTTP_VERB_HEAD, "/subdir/file2.txt");
testResponseP(.header = "content-length:777\r\nLast-Modified: Wed, 22 Oct 2015 07:28:00 GMT");
TEST_ASSIGN(
info, storageInfoP(storage, strNew("subdir/file2.txt"), .level = storageInfoLevelExists), "file exists");
TEST_RESULT_BOOL(info.exists, true, " check exists");
TEST_RESULT_UINT(info.type, storageTypeFile, " check type");
TEST_RESULT_UINT(info.size, 0, " check exists");
TEST_RESULT_INT(info.timeModified, 0, " check time");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("list basic level");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&prefix=path%2Fto%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>path/to/test_file</Name>"
" <Properties>"
" <Last-Modified>Mon, 12 Oct 2009 17:50:30 GMT</Last-Modified>"
" <Content-Length>787</Content-Length>"
" </Properties>"
" </Blob>"
" <BlobPrefix>"
" <Name>path/to/test_path/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
HarnessStorageInfoListCallbackData callbackData =
{
.content = strNew(""),
};
TEST_ERROR(
storageInfoListP(storage, strNew("/"), hrnStorageInfoListCallback, NULL, .errorOnMissing = true),
AssertError, "assertion '!param.errorOnMissing || storageFeature(this, storageFeaturePath)' failed");
TEST_RESULT_VOID(
storageInfoListP(storage, strNew("/path/to"), hrnStorageInfoListCallback, &callbackData), "list");
TEST_RESULT_STR_Z(
callbackData.content,
"test_path {path}\n"
"test_file {file, s=787, t=1255369830}\n",
"check");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("list exists level");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>test1.txt</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>path1/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
callbackData.content = strNew("");
TEST_RESULT_VOID(
storageInfoListP(
storage, strNew("/"), hrnStorageInfoListCallback, &callbackData, .level = storageInfoLevelExists),
"list");
TEST_RESULT_STR_Z(
callbackData.content,
"path1 {}\n"
"test1.txt {}\n",
"check");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("list a file in root with expression");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&prefix=test&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>test1.txt</Name>"
" <Properties/>"
" </Blob>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
callbackData.content = strNew("");
TEST_RESULT_VOID(
storageInfoListP(
storage, strNew("/"), hrnStorageInfoListCallback, &callbackData, .expression = strNew("^test.*$"),
.level = storageInfoLevelExists),
"list");
TEST_RESULT_STR_Z(
callbackData.content,
"test1.txt {}\n",
"check");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("list files with continuation");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&prefix=path%2Fto%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>path/to/test1.txt</Name>"
" <Properties/>"
" </Blob>"
" <Blob>"
" <Name>path/to/test2.txt</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>path/to/path1/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker>ueGcxLPRx1Tr</NextMarker>"
"</EnumerationResults>");
// "<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">"
// " <NextContinuationToken>1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=</NextContinuationToken>"
// " <Contents>"
// " <Key>path/to/test1.txt</Key>"
// " </Contents>"
// " <Contents>"
// " <Key>path/to/test2.txt</Key>"
// " </Contents>"
// " <CommonPrefixes>"
// " <Prefix>path/to/path1/</Prefix>"
// " </CommonPrefixes>"
// "</ListBucketResult>");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&marker=ueGcxLPRx1Tr&prefix=path%2Fto%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>path/to/test3.txt</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>path/to/path2/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
callbackData.content = strNew("");
TEST_RESULT_VOID(
storageInfoListP(
storage, strNew("/path/to"), hrnStorageInfoListCallback, &callbackData, .level = storageInfoLevelExists),
"list");
TEST_RESULT_STR_Z(
callbackData.content,
"path1 {}\n"
"test1.txt {}\n"
"test2.txt {}\n"
"path2 {}\n"
"test3.txt {}\n",
"check");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("list files with expression");
testRequestP(HTTP_VERB_GET, "?comp=list&delimiter=%2F&prefix=path%2Fto%2Ftest&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>path/to/test1.txt</Name>"
" <Properties/>"
" </Blob>"
" <Blob>"
" <Name>path/to/test2.txt</Name>"
" <Properties/>"
" </Blob>"
" <Blob>"
" <Name>path/to/test3.txt</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>path/to/test1.path/</Name>"
" </BlobPrefix>"
" <BlobPrefix>"
" <Name>path/to/test2.path/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
callbackData.content = strNew("");
TEST_RESULT_VOID(
storageInfoListP(
storage, strNew("/path/to"), hrnStorageInfoListCallback, &callbackData, .expression = strNew("^test(1|3)"),
.level = storageInfoLevelExists),
"list");
TEST_RESULT_STR_Z(
callbackData.content,
"test1.path {}\n"
"test1.txt {}\n"
"test3.txt {}\n",
"check");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("remove file");
testRequestP(HTTP_VERB_DELETE, "/path/to/test.txt");
testResponseP();
TEST_RESULT_VOID(storageRemoveP(storage, strNew("/path/to/test.txt")), "remove");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("remove missing file");
testRequestP(HTTP_VERB_DELETE, "/path/to/missing.txt");
testResponseP(.code = 404);
TEST_RESULT_VOID(storageRemoveP(storage, strNew("/path/to/missing.txt")), "remove");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("remove files from root");
testRequestP(HTTP_VERB_GET, "?comp=list&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>test1.txt</Name>"
" <Properties/>"
" </Blob>"
" <Blob>"
" <Name>path1/xxx.zzz</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>not-deleted/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
testRequestP(HTTP_VERB_DELETE, "/test1.txt");
testResponseP();
testRequestP(HTTP_VERB_DELETE, "/path1/xxx.zzz");
testResponseP();
TEST_RESULT_VOID(storagePathRemoveP(storage, strNew("/"), .recurse = true), "remove");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("remove files from path");
testRequestP(HTTP_VERB_GET, "?comp=list&prefix=path%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" <Blob>"
" <Name>path/test1.txt</Name>"
" <Properties/>"
" </Blob>"
" <Blob>"
" <Name>path/path1/xxx.zzz</Name>"
" <Properties/>"
" </Blob>"
" <BlobPrefix>"
" <Name>path/not-deleted/</Name>"
" </BlobPrefix>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
testRequestP(HTTP_VERB_DELETE, "/path/test1.txt");
testResponseP();
testRequestP(HTTP_VERB_DELETE, "/path/path1/xxx.zzz");
testResponseP();
TEST_RESULT_VOID(storagePathRemoveP(storage, strNew("/path"), .recurse = true), "remove");
// -----------------------------------------------------------------------------------------------------------------
TEST_TITLE("remove files in empty subpath (nothing to do)");
testRequestP(HTTP_VERB_GET, "?comp=list&prefix=path%2F&restype=container");
testResponseP(
.content =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<EnumerationResults>"
" <Blobs>"
" </Blobs>"
" <NextMarker/>"
"</EnumerationResults>");
TEST_RESULT_VOID(storagePathRemoveP(storage, strNew("/path"), .recurse = true), "remove");
// -----------------------------------------------------------------------------------------------------------------
hrnTlsClientEnd();
}
HARNESS_FORK_PARENT_END();
}
HARNESS_FORK_END();
}
FUNCTION_HARNESS_RESULT_VOID();
}