fix: Migrate away from DockerHub where possible

This commit is contained in:
Adrien Poupa 2025-03-09 13:53:29 -04:00
parent 2a7dbd0342
commit ddcc9210e6
5 changed files with 53 additions and 30 deletions

View File

@ -1,6 +1,6 @@
services:
traefik:
image: traefik:v3.3
image: ghcr.io/traefik/traefik:3.3
container_name: traefik
restart: always
environment:
@ -36,7 +36,7 @@ services:
extra_hosts:
- host.docker.internal:172.17.0.1
healthcheck:
test: [ "CMD", "traefik", "healthcheck", "--ping" ]
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
retries: 10
sonarr:
@ -51,7 +51,7 @@ services:
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:8989/sonarr/ping" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8989/sonarr/ping"]
interval: 30s
retries: 10
labels:
@ -81,7 +81,7 @@ services:
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:7878/radarr/ping" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:7878/radarr/ping"]
interval: 30s
retries: 10
labels:
@ -111,7 +111,7 @@ services:
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:8686/lidarr/ping" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8686/lidarr/ping"]
interval: 30s
retries: 10
labels:
@ -143,7 +143,7 @@ services:
- ${DATA_ROOT}:/data
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:6767/bazarr/ping" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:6767/bazarr/ping"]
interval: 5s
retries: 10
labels:
@ -162,7 +162,7 @@ services:
- homepage.widget.url=http://bazarr:6767/bazarr
- homepage.widget.key=${BAZARR_API_KEY}
jellyseerr:
image: fallenbagel/jellyseerr:latest
image: ghcr.io/fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- LOG_LEVEL=debug
@ -171,7 +171,14 @@ services:
- ${CONFIG_ROOT:-.}/jellyseerr:/app/config
restart: always
healthcheck:
test: ["CMD", "wget", "http://127.0.0.1:5055/api/v1/status", "-qO", "/dev/null"]
test:
[
"CMD",
"wget",
"http://127.0.0.1:5055/api/v1/status",
"-qO",
"/dev/null",
]
interval: 30s
retries: 10
labels:
@ -246,7 +253,7 @@ services:
- ${CONFIG_ROOT:-.}/prowlarr:/config
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:9696/prowlarr/ping" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:9696/prowlarr/ping"]
interval: 30s
retries: 10
labels:
@ -296,7 +303,8 @@ services:
healthcheck:
# Container may fail if the PIA's token expired, so mark as unhealthy when there is no internet connection
# see: https://github.com/qdm12/gluetun/issues/641#issuecomment-933856220
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:8080", "https://google.com" ]
test:
["CMD", "curl", "--fail", "http://127.0.0.1:8080", "https://google.com"]
interval: 30s
retries: 10
network_mode: "service:vpn"
@ -328,7 +336,7 @@ services:
- homepage.widget.username=${QBITTORRENT_USERNAME}
- homepage.widget.password=${QBITTORRENT_PASSWORD}
vpn:
image: thrnz/docker-wireguard-pia
image: ghcr.io/thrnz/docker-wireguard-pia:latest
container_name: vpn
volumes:
- ${CONFIG_ROOT:-.}/pia:/pia
@ -358,7 +366,7 @@ services:
retries: 3
restart: always
unpackerr:
image: golift/unpackerr:latest
image: ghcr.io/unpackerr/unpackerr:latest
container_name: unpackerr
volumes:
- ${DOWNLOAD_ROOT}:/data/torrents
@ -416,7 +424,7 @@ services:
- "1900:1900/udp"
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:8096/jellyfin/health" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8096/jellyfin/health"]
interval: 30s
retries: 10
labels:
@ -522,7 +530,8 @@ services:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ${DATA_ROOT}:/data
restart: always
command: [sh, -c, "cp -n /app/config/tpl/*.yaml /app/config && node server.js"]
command:
[sh, -c, "cp -n /app/config/tpl/*.yaml /app/config && node server.js"]
labels:
- traefik.enable=true
- traefik.http.routers.homepage.rule=(Host(`${HOSTNAME}`) && PathPrefix(`/`))
@ -530,7 +539,7 @@ services:
- traefik.http.routers.homepage.tls.certresolver=myresolver
- traefik.http.services.homepage.loadbalancer.server.port=3000
watchtower:
image: containrrr/watchtower
image: ghcr.io/containrrr/watchtower:latest
container_name: watchtower
restart: always
environment:

View File

@ -13,7 +13,7 @@ services:
- /run/dbus:/run/dbus:ro
restart: always
healthcheck:
test: [ "CMD", "curl", "--fail", "http://127.0.0.1:8123" ]
test: ["CMD", "curl", "--fail", "http://127.0.0.1:8123"]
interval: 30s
retries: 10
privileged: true
@ -36,7 +36,7 @@ services:
- homeassistant
mosquitto:
container_name: mosquitto
image: eclipse-mosquitto
image: public.ecr.aws/docker/library/eclipse-mosquitto:latest
restart: always
user: ${USER_ID}:${GROUP_ID}
environment:
@ -49,14 +49,28 @@ services:
ports:
- "1883:1883"
healthcheck:
test: [ "CMD", "mosquitto_sub", "-p", "1880", "-t", "$$SYS/#", "-C", "1", "-i", "healthcheck", "-W", "3" ]
test:
[
"CMD",
"mosquitto_sub",
"-p",
"1880",
"-t",
"$$SYS/#",
"-C",
"1",
"-i",
"healthcheck",
"-W",
"3",
]
interval: 1m
timeout: 10s
retries: 3
profiles:
- mqtt
homeassistant-backup:
image: adrienpoupa/rclone-backup:latest
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: homeassistant-backup
restart: always
env_file:
@ -68,4 +82,4 @@ services:
- ${CONFIG_ROOT:-.}/homeassistant/backups:/backups
- ${CONFIG_ROOT:-.}/homeassistant/backup:/config
profiles:
- homeassistant
- homeassistant

View File

@ -1,7 +1,7 @@
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:v1.125.2
image: ghcr.io/immich-app/immich-server:v1.129.0
environment:
DB_HOSTNAME: immich_postgres
DB_PASSWORD: ${IMMICH_DB_PASSWORD}
@ -37,7 +37,7 @@ services:
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:v1.125.2
image: ghcr.io/immich-app/immich-machine-learning:v1.129.0
volumes:
- immich-model-cache:/cache
restart: always
@ -46,7 +46,7 @@ services:
immich-redis:
container_name: immich_redis
image: registry.hub.docker.com/library/redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
image: public.ecr.aws/docker/library/redis:6.2
restart: always
healthcheck:
test: redis-cli ping || exit 1
@ -55,12 +55,12 @@ services:
immich-database:
container_name: immich_postgres
image: registry.hub.docker.com/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
image: ghcr.io/tensorchord/pgvecto-rs:pg14-v0.2.0
environment:
POSTGRES_PASSWORD: ${IMMICH_DB_PASSWORD}
POSTGRES_USER: postgres
POSTGRES_DB: immich
POSTGRES_INITDB_ARGS: '--data-checksums'
POSTGRES_INITDB_ARGS: "--data-checksums"
volumes:
- ${CONFIG_ROOT:-.}/immich/postgresql:/var/lib/postgresql/data
restart: always

View File

@ -41,7 +41,7 @@ services:
- joplin
joplin-backup:
image: adrienpoupa/rclone-backup:latest
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: joplin-backup
restart: always
env_file:
@ -56,4 +56,4 @@ services:
- ${CONFIG_ROOT:-.}/joplin/storage:/storage
- ${CONFIG_ROOT:-.}/joplin/backup:/config
profiles:
- joplin
- joplin

View File

@ -1,6 +1,6 @@
services:
tandoor:
image: vabene1111/recipes:latest
image: ghcr.io/tandoorrecipes/recipes:latest
container_name: tandoor
restart: always
env_file:
@ -17,7 +17,7 @@ services:
- tandoor
tandoor-nginx:
image: nginx:mainline-alpine
image: public.ecr.aws/nginx/nginx:mainline-alpine
container_name: tandoor-nginx
restart: always
env_file:
@ -48,7 +48,7 @@ services:
- tandoor
tandoor-backup:
image: adrienpoupa/rclone-backup:latest
image: ghcr.io/adrienpoupa/rclone-backup:latest
container_name: tandoor-backup
restart: always
env_file: