1
0
mirror of https://github.com/offen/docker-volume-backup.git synced 2025-11-23 21:44:40 +02:00

Fall back to default behavior when deployed to a worker node (#593)

* Add test demonstrating failure when running on worker node

* Consider manager status when checking for swarm related features

* Update documentation
This commit is contained in:
Frederik Ring
2025-06-09 14:19:18 +02:00
committed by GitHub
parent 5291c5cc1c
commit 0ce19a4ff2
9 changed files with 131 additions and 7 deletions

View File

@@ -67,3 +67,11 @@ A test case can signal it wants to run in swarm mode by placing an empty `.swarm
In case the swarm setup should be compose of multiple nodes, a `.multinode` file can be used.
A multinode setup will contain one manager (`manager`) and two worker nodes (`worker1` and `worker2`).
If a test is expected to run in the context of a node other than the `manager`, the hostname can be put in the `.multinode` file.
> [!IMPORTANT]
> When running against a multi-node setup and targeting a non-manager node, the test script will automatically deploy a stack named `test_stack` based on the compose file in the test directory.
> This is required because the non-manager node cannot deploy the stack itself from within the test script.
> This also means, you cannot mount local directories created in your test script, as the containers are already created when the script runs.
> You can work around this limitation by creating named volumes and then `docker cp`ing the contents your test needs to inspect.

View File

@@ -1,5 +1,6 @@
services:
manager: &node
hostname: manager
privileged: true
image: offen/docker-volume-backup:test-sandbox
healthcheck:
@@ -8,17 +9,19 @@ services:
timeout: 5s
retries: 50
volumes:
- $SOURCE:/code
- $TARBALL:/cache/image.tar.gz
- ./:/code
- ${TARBALL:-.}:/cache/image.tar.gz
- docker_volume_backup_test_sandbox_image:/var/lib/docker/image
- docker_volume_backup_test_sandbox_overlay2:/var/lib/docker/overlay2
worker1:
<<: *node
hostname: worker1
profiles:
- multinode
worker2:
<<: *node
hostname: worker2
profiles:
- multinode

View File

@@ -45,6 +45,12 @@ for dir in $(find $find_args | sort); do
fi
docker compose --profile $compose_profile up -d --wait
test_context=manager
if [ -f "${dir}/.multinode" ] && [ -s "${dir}/.multinode" ]; then
test_context=$(cat $dir/.multinode)
echo "Running tests on $test_context instead of manager"
fi
docker compose exec $test_context /bin/sh -c "docker load -i /cache/image.tar.gz"
if [ -f "${dir}/.swarm" ]; then
docker compose exec manager docker swarm init
@@ -54,10 +60,13 @@ for dir in $(find $find_args | sort); do
token=$(docker compose exec manager docker swarm join-token -q worker)
docker compose exec worker1 docker swarm join --token $token $manager_ip:2377
docker compose exec worker2 docker swarm join --token $token $manager_ip:2377
if [ "$test_context" != "manager" ]; then
docker compose exec -w "/code/$dir" manager docker stack deploy --compose-file="docker-compose.yml" test_stack
fi
fi
docker compose exec manager /bin/sh -c "docker load -i /cache/image.tar.gz"
docker compose exec -e TEST_VERSION=$IMAGE_TAG manager /bin/sh -c "/code/test/$test"
docker compose exec -e TEST_VERSION=$IMAGE_TAG $test_context /bin/sh -c "/code/$test"
docker compose --profile $compose_profile down
echo ""

View File

@@ -0,0 +1 @@
worker1

View File

@@ -0,0 +1,56 @@
services:
database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == worker1
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/tmp/volume
other_database:
image: mariadb:10.7
deploy:
placement:
constraints:
- node.hostname == manager
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=touch /tmp/volume/not-relevant.txt
- docker-volume-backup.exec-label=not-relevant
volumes:
- app_data:/tmp/volume
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
placement:
constraints:
- node.hostname == worker1
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- backup_archive:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
volumes:
app_data:
backup_archive:

33
test/worker-node/run.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
export TMP_DIR=$(mktemp -d)
export LOCAL_DIR=$(mktemp -d)
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
mkdir -p /archive
docker cp $(docker ps -q -f name=backup):/archive $LOCAL_DIR
tar -xvf "$LOCAL_DIR/archive/test.tar.gz" -C $TMP_DIR
if [ ! -f "$TMP_DIR/backup/data/dump.sql" ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
if [ -f "$TMP_DIR/backup/data/post.txt" ]; then
fail "File created in post command was present in backup."
fi
pass "Did not find unexpected file."