1
0
mirror of https://github.com/immich-app/immich.git synced 2024-12-25 10:43:13 +02:00

Merge branch 'immich-app:main' into feat/qol-web-face-search

This commit is contained in:
Jan108 2024-12-13 13:43:14 +01:00 committed by GitHub
commit 06c83fa6a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1713 changed files with 93456 additions and 54915 deletions

2
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,2 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:dc2c3654370fe92a55daeefe9d2d95839d85bdc1f68f7fd4ab86621f49e5818a
FROM ${BASEIMAGE}

View File

@ -0,0 +1,20 @@
{
"name": "Immich devcontainers",
"build": {
"dockerfile": "Dockerfile",
"args": {
"BASEIMAGE": "mcr.microsoft.com/devcontainers/typescript-node:22"
}
},
"customizations": {
"vscode": {
"extensions": [
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"postCreateCommand": "make install-all",
"remoteUser": "node"
}

View File

@ -22,6 +22,7 @@ open-api/typescript-sdk/node_modules/
server/coverage/
server/node_modules/
server/upload/
server/src/queries
server/dist/
server/www/

1
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
custom: ['https://buy.immich.app']

View File

@ -83,7 +83,6 @@ body:
2.
3.
...
render: bash
validations:
required: true

View File

@ -1,11 +1,14 @@
blank_issues_enabled: false
contact_links:
- name: I have a question or need support
- name: I have a question or need support
url: https://discord.immich.app
about: We use GitHub for tracking bugs, please check out our Discord channel for freaky fast support.
- name: Feature Request
- name: 📷 My photo or video has a date, time, or timezone problem
url: https://github.com/immich-app/immich/discussions/12650
about: Upload a sample file to this discussion and we will take a look
- name: 🌟 Feature request
url: https://github.com/immich-app/immich/discussions/new?category=feature-request
about: Please use our GitHub Discussion for making feature requests.
- name: I'm unsure where to go
- name: 🫣 I'm unsure where to go
url: https://discord.immich.app
about: If you are unsure where to go, then joining our Discord is recommended; Just ask!

View File

@ -1,7 +0,0 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

3
.github/labeler.yml vendored
View File

@ -33,3 +33,6 @@ documentation:
- changed-files:
- any-glob-to-any-file:
- machine-learning/app/**
changelog:translation:
- head-branch: ['^chore/translations$']

40
.github/release.yml vendored
View File

@ -1,41 +1,33 @@
changelog:
categories:
- title: ⚠️ Breaking Changes
- title: 🚨 Breaking Changes
labels:
- breaking-change
- changelog:breaking-change
- title: 🗄️ Server
- title: 🫥 Deprecated Changes
labels:
- 🗄️server
- changelog:deprecated
- title: 📱 Mobile
- title: 🔒 Security
labels:
- 📱mobile
- changelog:security
- title: 🖥️ Web
- title: 🚀 Features
labels:
- 🖥️web
- changelog:feature
- title: 🧠 Machine Learning
- title: 🌟 Enhancements
labels:
- 🧠machine-learning
- changelog:enhancement
- title: ⚡ CLI
- title: 🐛 Bug fixes
labels:
- cli
- changelog:bugfix
- title: 📓 Documentation
- title: 📚 Documentation
labels:
- documentation
- changelog:documentation
- title: 🔨 Maintenance
- title: 🌐 Translations
labels:
- deployment
- dependencies
- renovate
- maintenance
- tech-debt
- title: Other changes
labels:
- "*"
- changelog:translation

View File

@ -16,10 +16,28 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android
needs: pre-job
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }}
if: ${{ !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_run == 'true' }}
runs-on: macos-14
steps:

View File

@ -22,7 +22,7 @@ permissions:
jobs:
publish:
name: Publish
name: CLI Publish
runs-on: ubuntu-latest
defaults:
run:
@ -59,7 +59,7 @@ jobs:
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.6.1
uses: docker/setup-buildx-action@v3.7.1
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.6.1
uses: docker/build-push-action@v6.10.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

View File

@ -22,7 +22,7 @@ concurrency:
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
@ -35,7 +35,7 @@ jobs:
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.8.0
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
@ -48,7 +48,7 @@ jobs:
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
needs:
- cleanup-images
strategy:
@ -64,7 +64,7 @@ jobs:
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.8.0
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"

View File

@ -17,47 +17,106 @@ permissions:
packages: write
jobs:
build_and_push:
name: Build and Push
pre-job:
runs-on: ubuntu-latest
outputs:
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
server:
- 'server/**'
- 'openapi/**'
- 'web/**'
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: ["", "-cuda", "-openvino", "-armnn"]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'false' && !github.event.pull_request.head.repo.fork }}
runs-on: ubuntu-latest
strategy:
matrix:
suffix: [""]
steps:
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Re-tag image
run: |
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ubuntu-latest
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64,linux/arm64
- platforms: linux/amd64,linux/arm64
device: cpu
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
- platforms: linux/amd64
device: cuda
suffix: -cuda
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/amd64
- platforms: linux/amd64
device: openvino
suffix: -openvino
- image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
platforms: linux/arm64
- platforms: linux/arm64
device: armnn
suffix: -armnn
- image: immich-server
context: .
file: server/Dockerfile
platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Checkout
uses: actions/checkout@v4
@ -66,7 +125,7 @@ jobs:
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.6.1
uses: docker/setup-buildx-action@v3.7.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
@ -93,8 +152,8 @@ jobs:
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{matrix.image}}
name=altran1502/${{matrix.image}},enable=${{ github.event_name == 'release' }}
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
@ -111,18 +170,18 @@ jobs:
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ matrix.image }}" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.6.1
uses: docker/build-push-action@v6.10.0
with:
context: ${{ matrix.context }}
file: ${{ matrix.file }}
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{matrix.image}}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
@ -132,3 +191,120 @@ jobs:
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
build_and_push_server:
name: Build and Push Server
runs-on: ubuntu-latest
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
strategy:
fail-fast: false
matrix:
include:
- platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.7.1
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.10.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
success-check-server:
name: Docker Build & Push Server Success
needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"
success-check-ml:
name: Docker Build & Push ML Success
needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

View File

@ -2,12 +2,8 @@ name: Docs build
on:
push:
branches: [main]
paths:
- "docs/**"
pull_request:
branches: [main]
paths:
- "docs/**"
release:
types: [published]
@ -16,7 +12,27 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
docs:
- 'docs/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
defaults:
run:

View File

@ -7,13 +7,32 @@ on:
jobs:
checks:
name: Docs Deploy Checks
runs-on: ubuntu-latest
outputs:
parameters: ${{ steps.parameters.outputs.result }}
artifact: ${{ steps.get-artifact.outputs.result }}
steps:
- if: ${{ github.event.workflow_run.conclusion == 'failure' }}
run: echo 'The triggering workflow failed' && exit 1
- if: ${{ github.event.workflow_run.conclusion != 'success' }}
run: echo 'The triggering workflow did not succeed' && exit 1
- name: Get artifact
id: get-artifact
uses: actions/github-script@v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
if (!matchArtifact) {
console.log("No artifact found with the name docs-build-output, build job was skipped")
return { found: false };
}
return { found: true, id: matchArtifact.id };
- name: Determine deploy parameters
id: parameters
uses: actions/github-script@v7
@ -73,9 +92,10 @@ jobs:
return parameters;
deploy:
name: Docs Deploy
runs-on: ubuntu-latest
needs: checks
if: ${{ fromJson(needs.checks.outputs.parameters).shouldDeploy }}
if: ${{ fromJson(needs.checks.outputs.artifact).found && fromJson(needs.checks.outputs.parameters).shouldDeploy }}
steps:
- name: Checkout code
uses: actions/checkout@v4
@ -98,18 +118,11 @@ jobs:
uses: actions/github-script@v7
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "docs-build-output"
})[0];
let artifact = ${{ needs.checks.outputs.artifact }};
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
artifact_id: artifact.id,
archive_format: 'zip',
});
let fs = require('fs');

View File

@ -5,6 +5,7 @@ on:
jobs:
deploy:
name: Docs Destroy
runs-on: ubuntu-latest
steps:
- name: Checkout code
@ -22,7 +23,7 @@ jobs:
tg_version: "0.58.12"
tofu_version: "1.7.1"
tg_dir: "deployment/modules/cloudflare/docs"
tg_command: "destroy"
tg_command: "destroy -refresh=false"
- name: Comment
uses: actions-cool/maintain-one-comment@v3

52
.github/workflows/fix-format.yml vendored Normal file
View File

@ -0,0 +1,52 @@
name: Fix formatting
on:
pull_request:
types: [labeled]
jobs:
fix-formatting:
runs-on: ubuntu-latest
if: ${{ github.event.label.name == 'fix:formatting' }}
permissions:
pull-requests: write
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: 'Checkout'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ steps.generate-token.outputs.token }}
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
- name: Fix formatting
run: make install-all && make format-all
- name: Commit and push
uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
message: 'chore: fix formatting'
- name: Remove label
uses: actions/github-script@v7
if: always()
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'fix:formatting'
})

View File

@ -0,0 +1,21 @@
name: PR Label Validation
on:
pull_request_target:
types: [opened, labeled, unlabeled, synchronize]
jobs:
validate-release-label:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- name: Require PR to have a changelog label
uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 1
use_regex: true
labels: "changelog:.*"
add_comment: true

View File

@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/PRConventionalCommits@1.2.0
uses: ytanikin/PRConventionalCommits@1.3.0
with:
task_types: '["feat","fix","docs","test","ci","refactor","perf","chore","revert"]'
add_label: 'false'

View File

@ -29,10 +29,17 @@ jobs:
ref: ${{ steps.push-tag.outputs.commit_long_sha }}
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ secrets.ORG_RELEASE_TOKEN }}
token: ${{ steps.generate-token.outputs.token }}
- name: Install Poetry
run: pipx install poetry
@ -44,10 +51,8 @@ jobs:
id: push-tag
uses: EndBug/add-and-commit@v9
with:
author_name: Alex The Bot
author_email: alex.tran1502@gmail.com
default_author: user_info
message: 'Version ${{ env.IMMICH_VERSION }}'
default_author: github_actions
message: 'chore: version ${{ env.IMMICH_VERSION }}'
tag: ${{ env.IMMICH_VERSION }}
push: true

View File

@ -10,8 +10,27 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
mobile:
- 'mobile/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
runs-on: ubuntu-latest
@ -37,6 +56,10 @@ jobs:
run: dart format lib/ --set-exit-if-changed
working-directory: ./mobile
- name: Run dart custom_lint
run: dart run custom_lint
working-directory: ./mobile
# Enable after riverpod generator migration is completed
# - name: Run dart custom lint
# run: dart run custom_lint

View File

@ -10,8 +10,48 @@ concurrency:
cancel-in-progress: true
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run_web: ${{ steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_server: ${{ steps.found_paths.outputs.server == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_cli: ${{ steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e: ${{ steps.found_paths.outputs.e2e == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_mobile: ${{ steps.found_paths.outputs.mobile == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_ml: ${{ steps.found_paths.outputs.machine-learning == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_web: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.web == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run_e2e_server_cli: ${{ steps.found_paths.outputs.e2e == 'true' || steps.found_paths.outputs.server == 'true' || steps.found_paths.outputs.cli == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
web:
- 'web/**'
- 'i18n/**'
- 'open-api/typescript-sdk/**'
server:
- 'server/**'
cli:
- 'cli/**'
- 'open-api/typescript-sdk/**'
e2e:
- 'e2e/**'
mobile:
- 'mobile/**'
machine-learning:
- 'machine-learning/**'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests:
name: Server
name: Test & Lint Server
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@ -41,12 +81,14 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
- name: Run unit tests & coverage
- name: Run small tests & coverage
run: npm run test:cov
if: ${{ !cancelled() }}
cli-unit-tests:
name: CLI
name: Unit Test CLI
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@ -85,7 +127,9 @@ jobs:
if: ${{ !cancelled() }}
cli-unit-tests-win:
name: CLI (Windows)
name: Unit Test CLI (Windows)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_cli == 'true' }}
runs-on: windows-latest
defaults:
run:
@ -117,7 +161,9 @@ jobs:
if: ${{ !cancelled() }}
web-unit-tests:
name: Web
name: Test & Lint Web
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_web == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
@ -159,13 +205,74 @@ jobs:
run: npm run test:cov
if: ${{ !cancelled() }}
e2e-tests:
name: End-to-End Tests
e2e-tests-lint:
name: End-to-End Lint
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
- name: Run linter
run: npm run lint
if: ${{ !cancelled() }}
- name: Run formatter
run: npm run format
if: ${{ !cancelled() }}
- name: Run tsc
run: npm run check
if: ${{ !cancelled() }}
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_server_cli == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
@ -191,16 +298,41 @@ jobs:
run: npm ci
if: ${{ !cancelled() }}
- name: Run linter
run: npm run lint
- name: Docker build
run: docker compose build
if: ${{ !cancelled() }}
- name: Run formatter
run: npm run format
- name: Run e2e tests (api & cli)
run: npm run test
if: ${{ !cancelled() }}
- name: Run tsc
run: npm run check
e2e-tests-web:
name: End-to-End Tests (Web)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_e2e_web == 'true' }}
runs-on: mich
defaults:
run:
working-directory: ./e2e
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './e2e/.nvmrc'
- name: Run setup typescript-sdk
run: npm ci && npm run build
working-directory: ./open-api/typescript-sdk
if: ${{ !cancelled() }}
- name: Install dependencies
run: npm ci
if: ${{ !cancelled() }}
- name: Install Playwright Browsers
@ -211,16 +343,14 @@ jobs:
run: docker compose build
if: ${{ !cancelled() }}
- name: Run e2e tests (api & cli)
run: npm run test
if: ${{ !cancelled() }}
- name: Run e2e tests (web)
run: npx playwright test
if: ${{ !cancelled() }}
mobile-unit-tests:
name: Mobile
name: Unit Test Mobile
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_mobile == 'true' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@ -234,7 +364,9 @@ jobs:
run: flutter test -j 1
ml-unit-tests:
name: Machine Learning
name: Unit Test ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ubuntu-latest
defaults:
run:

2
.gitignore vendored
View File

@ -21,3 +21,5 @@ mobile/openapi/.openapi-generator/FILES
open-api/typescript-sdk/build
mobile/android/fastlane/report.xml
mobile/ios/fastlane/report.xml
vite.config.js.timestamp-*

2
.gitmodules vendored
View File

@ -1,6 +1,6 @@
[submodule "mobile/.isar"]
path = mobile/.isar
url = https://github.com/isar/isar
[submodule "server/test/assets"]
[submodule "e2e/test-assets"]
path = e2e/test-assets
url = https://github.com/immich-app/test-assets

8
.vscode/launch.json vendored
View File

@ -5,8 +5,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9230,
"name": "Immich Server",
"port": 9231,
"name": "Immich API Server",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
},
@ -14,8 +14,8 @@
"type": "node",
"request": "attach",
"restart": true,
"port": 9231,
"name": "Immich Microservices",
"port": 9230,
"name": "Immich Workers",
"remoteRoot": "/usr/src/app",
"localRoot": "${workspaceFolder}/server"
}

View File

@ -41,4 +41,4 @@
"explorer.fileNesting.patterns": {
"*.ts": "${capture}.spec.ts,${capture}.mock.ts"
}
}
}

View File

@ -39,7 +39,7 @@ attach-server:
renovate:
LOG_LEVEL=debug npx renovate --platform=local --repository-cache=reset
MODULES = e2e server web cli sdk
MODULES = e2e server web cli sdk docs
audit-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) audit fix
@ -48,11 +48,9 @@ install-%:
build-cli: build-sdk
build-web: build-sdk
build-%: install-%
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'build' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build || true
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run build
format-%:
npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run | grep 'format:fix' >/dev/null \
&& npm --prefix $(subst sdk,open-api/typescript-sdk,$*) run format:fix || true
npm --prefix $* run format:fix
lint-%:
npm --prefix $* run lint:fix
check-%:
@ -66,15 +64,27 @@ test-e2e:
docker compose -f ./e2e/docker-compose.yml build
npm --prefix e2e run test
npm --prefix e2e run test:web
test-medium:
docker run \
--rm \
-v ./server/src:/usr/src/app/src \
-v ./server/test:/usr/src/app/test \
-v ./server/vitest.config.medium.mjs:/usr/src/app/vitest.config.medium.mjs \
-v ./server/tsconfig.json:/usr/src/app/tsconfig.json \
-e NODE_ENV=development \
immich-server:latest \
-c "npm ci && npm run test:medium -- --run"
test-medium-dev:
docker exec -it immich_server /bin/sh -c "npm run test:medium"
build-all: $(foreach M,$(MODULES),build-$M) ;
build-all: $(foreach M,$(filter-out e2e,$(MODULES)),build-$M) ;
install-all: $(foreach M,$(MODULES),install-$M) ;
check-all: $(foreach M,$(MODULES),check-$M) ;
lint-all: $(foreach M,$(MODULES),lint-$M) ;
format-all: $(foreach M,$(MODULES),format-$M) ;
check-all: $(foreach M,$(filter-out sdk cli docs,$(MODULES)),check-$M) ;
lint-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),lint-$M) ;
format-all: $(foreach M,$(filter-out sdk,$(MODULES)),format-$M) ;
audit-all: $(foreach M,$(MODULES),audit-$M) ;
hygiene-all: lint-all format-all check-all sql audit-all;
test-all: $(foreach M,$(MODULES),test-$M) ;
test-all: $(foreach M,$(filter-out sdk docs,$(MODULES)),test-$M) ;
clean:
find . -name "node_modules" -type d -prune -exec rm -rf '{}' +

View File

@ -17,23 +17,24 @@
<img src="design/immich-screenshots.png" title="Main Screenshot">
</a>
<br/>
<p align="center">
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_ca_ES.md">Català</a>
<a href="readme_i18n/README_es_ES.md">Español</a>
<a href="readme_i18n/README_fr_FR.md">Français</a>
<a href="readme_i18n/README_it_IT.md">Italiano</a>
<a href="readme_i18n/README_ja_JP.md">日本語</a>
<a href="readme_i18n/README_ko_KR.md">한국어</a>
<a href="readme_i18n/README_de_DE.md">Deutsch</a>
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>
<a href="readme_i18n/README_ar_JO.md">العربية</a>
<a href="readme_i18n/README_vi_VN.md">Tiếng Việt</a>
<a href="readme_i18n/README_th_TH.md">ภาษาไทย</a>
</p>
## Disclaimer
@ -92,7 +93,7 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| LivePhoto/MotionPhoto backup and playback | Yes | Yes |
| Support 360 degree image display | No | Yes |
| User-defined storage structure | Yes | Yes |
| Public Sharing | No | Yes |
| Public Sharing | Yes | Yes |
| Archive and Favorites | Yes | Yes |
| Global Map | Yes | Yes |
| Partner Sharing | Yes | Yes |
@ -101,6 +102,8 @@ For the mobile app, you can use `https://demo.immich.app/api` for the `Server En
| Offline support | Yes | No |
| Read-only gallery | Yes | Yes |
| Stacked Photos | Yes | Yes |
| Tags | No | Yes |
| Folder View | No | Yes |
## Translations

View File

@ -1 +1 @@
20.16.0
22.12.0

View File

@ -1,4 +1,4 @@
FROM node:20.16.0-alpine3.20@sha256:eb8101caae9ac02229bd64c024919fe3d4504ff7f329da79ca60a04db08cef52 AS core
FROM node:22.11.0-alpine3.20@sha256:b64ced2e7cd0a4816699fe308ce6e8a08ccba463c757c00c14cd372e3d2c763e AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

View File

@ -4,8 +4,18 @@ Please see the [Immich CLI documentation](https://immich.app/docs/features/comma
# For developers
Before building the CLI, you must build the immich server and the open-api client. To build the server run the following in the server folder:
$ npm install
$ npm run build
Then, to build the open-api client run the following in the open-api folder:
$ ./bin/generate-open-api.sh
To run the Immich CLI from source, run the following in the cli folder:
$ npm install
$ npm run build
$ ts-node .
@ -17,3 +27,4 @@ You can also build and install the CLI using
$ npm run build
$ npm install -g .
****

View File

@ -55,6 +55,7 @@ export default [
'unicorn/import-style': 'off',
curly: 2,
'prettier/prettier': 0,
'object-shorthand': ['error', 'always'],
},
},
];

1194
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.13",
"version": "2.2.36",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@ -20,14 +20,14 @@
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/mock-fs": "^4.13.1",
"@types/node": "^20.14.13",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@types/node": "^22.9.0",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^55.0.0",
@ -37,9 +37,9 @@
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^4.3.2",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.3.0",
"vitest-fetch-mock": "^0.4.0",
"yaml": "^2.3.1"
},
"scripts": {
@ -67,6 +67,6 @@
"lodash-es": "^4.17.21"
},
"volta": {
"node": "20.16.0"
"node": "22.12.0"
}
}

View File

@ -1,5 +1,6 @@
import {
Action,
AssetBulkUploadCheckItem,
AssetBulkUploadCheckResult,
AssetMediaResponseDto,
AssetMediaStatus,
@ -11,7 +12,7 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { Presets, SingleBar } from 'cli-progress';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
@ -90,23 +91,23 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles: files, duplicates: [] };
}
const progressBar = new SingleBar(
{ format: 'Checking files | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
progressBar.start(files.length, 0);
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
const newFiles: string[] = [];
const duplicates: Asset[] = [];
const queue = new Queue<string[], AssetBulkUploadCheckResults>(
async (filepaths: string[]) => {
const dto = await Promise.all(
filepaths.map(async (filepath) => ({ id: filepath, checksum: await sha1(filepath) })),
);
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets: dto } });
const checkBulkUploadQueue = new Queue<AssetBulkUploadCheckItem[], void>(
async (assets: AssetBulkUploadCheckItem[]) => {
const response = await checkBulkUpload({ assetBulkUploadCheckDto: { assets } });
const results = response.results as AssetBulkUploadCheckResults;
for (const { id: filepath, assetId, action } of results) {
if (action === Action.Accept) {
newFiles.push(filepath);
@ -115,19 +116,46 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
duplicates.push({ id: assetId as string, filepath });
}
}
progressBar.increment(filepaths.length);
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
const results: { id: string; checksum: string }[] = [];
let checkBulkUploadRequests: AssetBulkUploadCheckItem[] = [];
const queue = new Queue<string, AssetBulkUploadCheckItem[]>(
async (filepath: string): Promise<AssetBulkUploadCheckItem[]> => {
const dto = { id: filepath, checksum: await sha1(filepath) };
results.push(dto);
checkBulkUploadRequests.push(dto);
if (checkBulkUploadRequests.length === 5000) {
const batch = checkBulkUploadRequests;
checkBulkUploadRequests = [];
void checkBulkUploadQueue.push(batch);
}
hashProgressBar.increment();
return results;
},
{ concurrency, retry: 3 },
);
for (const items of chunk(files, concurrency)) {
await queue.push(items);
for (const item of files) {
void queue.push(item);
}
await queue.drained();
progressBar.stop();
if (checkBulkUploadRequests.length > 0) {
void checkBulkUploadQueue.push(checkBulkUploadRequests);
}
await checkBulkUploadQueue.drained();
multiBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@ -201,8 +229,8 @@ export const uploadFiles = async (files: string[], { dryRun, concurrency }: Uplo
{ concurrency, retry: 3 },
);
for (const filepath of files) {
await queue.push(filepath);
for (const item of files) {
void queue.push(item);
}
await queue.drained();

View File

@ -72,8 +72,8 @@ export class Queue<T, R> {
* @returns Promise<void> - The returned Promise will be resolved when all tasks in the queue have been processed by a worker.
* This promise could be ignored as it will not lead to a `unhandledRejection`.
*/
async drained(): Promise<void> {
await this.queue.drain();
drained(): Promise<void> {
return this.queue.drained();
}
/**

View File

@ -115,17 +115,7 @@ const tests: Test[] = [
'/albums/image3.jpg': true,
},
},
{
test: 'should support globbing paths',
options: {
pathsToCrawl: ['/photos*'],
},
files: {
'/photos1/image1.jpg': true,
'/photos2/image2.jpg': true,
'/images/image3.jpg': false,
},
},
{
test: 'should crawl a single path without trailing slash',
options: {

View File

@ -141,25 +141,21 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
}
}
let searchPattern: string;
if (patterns.length === 1) {
searchPattern = patterns[0];
} else if (patterns.length === 0) {
if (patterns.length === 0) {
return crawledFiles;
} else {
searchPattern = '{' + patterns.join(',') + '}';
}
if (recursive) {
searchPattern = searchPattern + '/**/';
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern;
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
return `${escapedPattern}/*.{${extensions.join(',')}}`;
});
searchPattern = `${searchPattern}/*.{${extensions.join(',')}}`;
const globbedFiles = await glob(searchPattern, {
const globbedFiles = await glob(searchPatterns, {
absolute: true,
caseSensitiveMatch: false,
onlyFiles: true,
dot: includeHidden,
ignore: [`**/${exclusionPattern}`],
});

View File

@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.38.0"
constraints = "4.38.0"
version = "4.48.0"
constraints = "4.48.0"
hashes = [
"h1:+27KAHKHBDvv3dqyJv5vhtdKQZJzoZXoMqIyronlHNw=",
"h1:/uV9RgOUhkxElkHhWs8fs5ZbX9vj6RCBfP0oJO0JF30=",
"h1:1DNAdMugJJOAWD/XYiZenYYZLy7fw2ctjT4YZmkRCVQ=",
"h1:1wn4PmCLdT7mvd74JkCGmJDJxTQDkcxc+1jNbmwnMHA=",
"h1:BIHB4fBxHg2bA9KbL92njhyctxKC8b6hNDp60y5QBss=",
"h1:HCQpvKPsMsR4HO5eDqt+Kao7T7CYeEH7KZIO7xMcC6M=",
"h1:HTomuzocukpNLwtWzeSF3yteCVsyVKbwKmN66u9iPac=",
"h1:YDxsUBhBAwHSXLzVwrSlSBOwv1NvLyry7s5SfCV7VqQ=",
"h1:dchVhxo+Acd1l2RuZ88tW9lWj4422QMfgtxKvKCjYrw=",
"h1:eypa+P4ZpsEGMPFuCE+6VkRefu0TZRFmVBOpK+PDOPY=",
"h1:f3yjse2OsRZj7ZhR7BLintJMlI4fpyt8HyDP/zcEavw=",
"h1:mSJ7xj8K+xcnEmGg7lH0jjzyQb157wH94ULTAlIV+HQ=",
"h1:tt+2J2Ze8VIdDq2Hr6uHlTJzAMBRpErBwTYx0uD5ilE=",
"h1:uQW8SKxmulqrAisO+365mIf2FueINAp5PY28bqCPCug=",
"zh:171ab67cccceead4514fafb2d39e4e708a90cce79000aaf3c29aab7ed4457071",
"zh:18aa7228447baaaefc49a43e8eff970817a7491a63d8937e796357a3829dd979",
"zh:2cbaab6092e81ba6f41fa60a50f14e980c8ec327ee11d0b21f16a478be4b7567",
"zh:53b8e49c06f5b31a8c681f8c0669cf43e78abe71657b8182a221d096bb514965",
"zh:6037cfc60b4b647aabae155fcb46d649ed7c650e0287f05db52b2068f1e27c8a",
"zh:62460982ce1a869eebfca675603fbbd50416cf6b69459fb855bfbe5ae2b97607",
"zh:65f6f3a8470917b6398baa5eb4f74b3932b213eac7c0202798bfad6fd1ee17df",
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8b5cebe64bf04105a49178a165b6a8800a9a33bae6767143a47fe4977755f805",
"zh:a5596635db0993ee3c3060fbc2227d91b239466e96d2d82642625a5aa2486988",
"zh:b3a9c63038441f13c311fd4b2c7e69e571445e5a7365a20c7cc9046b7e6c8aba",
"zh:b585e7e4d7648a540b14b9182819214896ca9337729eeb1f2034833b17db754d",
"zh:d2c3c545318ac8542369e9fc8228e29ee585febdf203a450fad3e0eded71ce02",
"zh:e95dd2d6c3525073af47d47b763cb81b6a51b20cabf76f789c69328922da9ecf",
"zh:eee6e590b36d6c6168a7daae8afa74a8721fd7aa9f62a710f04a311975100722",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
]
}

View File

@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.38.0"
version = "4.48.0"
}
}
}

View File

@ -9,6 +9,6 @@ resource "cloudflare_record" "immich_app_release_domain" {
proxied = true
ttl = 1
type = "CNAME"
value = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
content = data.terraform_remote_state.cloudflare_immich_app_docs.outputs.immich_app_branch_pages_hostname
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}

View File

@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.38.0"
constraints = "4.38.0"
version = "4.48.0"
constraints = "4.48.0"
hashes = [
"h1:+27KAHKHBDvv3dqyJv5vhtdKQZJzoZXoMqIyronlHNw=",
"h1:/uV9RgOUhkxElkHhWs8fs5ZbX9vj6RCBfP0oJO0JF30=",
"h1:1DNAdMugJJOAWD/XYiZenYYZLy7fw2ctjT4YZmkRCVQ=",
"h1:1wn4PmCLdT7mvd74JkCGmJDJxTQDkcxc+1jNbmwnMHA=",
"h1:BIHB4fBxHg2bA9KbL92njhyctxKC8b6hNDp60y5QBss=",
"h1:HCQpvKPsMsR4HO5eDqt+Kao7T7CYeEH7KZIO7xMcC6M=",
"h1:HTomuzocukpNLwtWzeSF3yteCVsyVKbwKmN66u9iPac=",
"h1:YDxsUBhBAwHSXLzVwrSlSBOwv1NvLyry7s5SfCV7VqQ=",
"h1:dchVhxo+Acd1l2RuZ88tW9lWj4422QMfgtxKvKCjYrw=",
"h1:eypa+P4ZpsEGMPFuCE+6VkRefu0TZRFmVBOpK+PDOPY=",
"h1:f3yjse2OsRZj7ZhR7BLintJMlI4fpyt8HyDP/zcEavw=",
"h1:mSJ7xj8K+xcnEmGg7lH0jjzyQb157wH94ULTAlIV+HQ=",
"h1:tt+2J2Ze8VIdDq2Hr6uHlTJzAMBRpErBwTYx0uD5ilE=",
"h1:uQW8SKxmulqrAisO+365mIf2FueINAp5PY28bqCPCug=",
"zh:171ab67cccceead4514fafb2d39e4e708a90cce79000aaf3c29aab7ed4457071",
"zh:18aa7228447baaaefc49a43e8eff970817a7491a63d8937e796357a3829dd979",
"zh:2cbaab6092e81ba6f41fa60a50f14e980c8ec327ee11d0b21f16a478be4b7567",
"zh:53b8e49c06f5b31a8c681f8c0669cf43e78abe71657b8182a221d096bb514965",
"zh:6037cfc60b4b647aabae155fcb46d649ed7c650e0287f05db52b2068f1e27c8a",
"zh:62460982ce1a869eebfca675603fbbd50416cf6b69459fb855bfbe5ae2b97607",
"zh:65f6f3a8470917b6398baa5eb4f74b3932b213eac7c0202798bfad6fd1ee17df",
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8b5cebe64bf04105a49178a165b6a8800a9a33bae6767143a47fe4977755f805",
"zh:a5596635db0993ee3c3060fbc2227d91b239466e96d2d82642625a5aa2486988",
"zh:b3a9c63038441f13c311fd4b2c7e69e571445e5a7365a20c7cc9046b7e6c8aba",
"zh:b585e7e4d7648a540b14b9182819214896ca9337729eeb1f2034833b17db754d",
"zh:d2c3c545318ac8542369e9fc8228e29ee585febdf203a450fad3e0eded71ce02",
"zh:e95dd2d6c3525073af47d47b763cb81b6a51b20cabf76f789c69328922da9ecf",
"zh:eee6e590b36d6c6168a7daae8afa74a8721fd7aa9f62a710f04a311975100722",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
]
}

View File

@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.38.0"
version = "4.48.0"
}
}
}

View File

@ -9,7 +9,7 @@ resource "cloudflare_record" "immich_app_branch_subdomain" {
proxied = true
ttl = 1
type = "CNAME"
value = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
content = "${replace(var.prefix_name, "/\\/|\\./", "-")}.${local.is_release ? data.terraform_remote_state.cloudflare_account.outputs.immich_app_archive_pages_project_subdomain : data.terraform_remote_state.cloudflare_account.outputs.immich_app_preview_pages_project_subdomain}"
zone_id = data.terraform_remote_state.cloudflare_account.outputs.immich_app_zone_id
}
@ -18,7 +18,7 @@ output "immich_app_branch_subdomain" {
}
output "immich_app_branch_pages_hostname" {
value = cloudflare_record.immich_app_branch_subdomain.value
value = cloudflare_record.immich_app_branch_subdomain.content
}
output "pages_project_name" {

View File

@ -36,13 +36,18 @@ services:
IMMICH_BUILD_URL: https://github.com/immich-app/immich/actions/runs/9654404849
IMMICH_BUILD_IMAGE: development
IMMICH_BUILD_IMAGE_URL: https://github.com/immich-app/immich/pkgs/container/immich-server
IMMICH_THIRD_PARTY_SOURCE_URL: https://github.com/immich-app/immich/
IMMICH_THIRD_PARTY_BUG_FEATURE_URL: https://github.com/immich-app/immich/issues
IMMICH_THIRD_PARTY_DOCUMENTATION_URL: https://immich.app/docs
IMMICH_THIRD_PARTY_SUPPORT_URL: https://immich.app/docs/third-party
ulimits:
nofile:
soft: 1048576
hard: 1048576
ports:
- 3001:3001
- 9230:9230
- 9231:9231
- 2283:2283
depends_on:
- redis
- database
@ -52,16 +57,19 @@ services:
immich-web:
container_name: immich_web
image: immich-web-dev:latest
# Needed for rootless docker setup, see https://github.com/moby/moby/issues/45919
# user: 0:0
build:
context: ../web
command: ['/usr/src/app/bin/immich-web']
env_file:
- .env
ports:
- 2283:3000
- 3000:3000
- 24678:24678
volumes:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
- /usr/src/app/node_modules
ulimits:
@ -98,7 +106,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
@ -117,28 +125,25 @@ services:
ports:
- 5432:5432
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
# set IMMICH_METRICS=true in .env to enable metrics
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
# immich-prometheus:
# container_name: immich_prometheus
# ports:

View File

@ -16,7 +16,7 @@ services:
env_file:
- .env
ports:
- 2283:3001
- 2283:2283
depends_on:
- redis
- database
@ -47,7 +47,7 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
restart: always
@ -67,19 +67,31 @@ services:
ports:
- 5432:5432
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always
# set IMMICH_METRICS=true in .env to enable metrics
# set IMMICH_TELEMETRY_INCLUDE=all in .env to enable metrics
immich-prometheus:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:cafe963e591c872d38f3ea41ff8eb22cee97917b7c97b5c0ccd43a419f11f613
image: prom/prometheus@sha256:565ee86501224ebbb98fc10b332fa54440b100469924003359edf49cbce374bd
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@ -91,7 +103,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.1.3-ubuntu@sha256:e10453733015f31103cb530425f32c994816b50102886fa885dafea2c50a711c
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
volumes:
- grafana-data:/var/lib/grafana

View File

@ -22,7 +22,7 @@ services:
env_file:
- .env
ports:
- 2283:3001
- '2283:2283'
depends_on:
- redis
- database
@ -48,7 +48,7 @@ services:
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e
image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
restart: always
@ -65,11 +65,23 @@ services:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
test: >-
pg_isready --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" || exit 1;
Chksum="$$(psql --dbname="$${POSTGRES_DB}" --username="$${POSTGRES_USER}" --tuples-only --no-align
--command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')";
echo "checksum failure count is $$Chksum";
[ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
command: >-
postgres
-c shared_preload_libraries=vectors.so
-c 'search_path="$$user", public, vectors'
-c logging_collector=on
-c max_wal_size=2GB
-c shared_buffers=512MB
-c wal_compression=on
restart: always
volumes:

View File

@ -51,5 +51,4 @@ services:
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:
- LD_LIBRARY_PATH=/usr/lib/wsl/lib
- LIBVA_DRIVER_NAME=d3d12

View File

@ -1 +1 @@
20.16.0
22.12.0

View File

@ -52,14 +52,25 @@ On iOS (iPhone and iPad), the operating system determines if a particular app ca
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often.
### Why are features not working with a self-signed cert or mTLS?
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
---
## Assets
### Does Immich change the file?
No, Immich does not touch the original file under any circumstances,
all edited metadata are saved in the companion sidecar file and the database.
No, Immich does not modify the original files.
All edited metadata is saved in companion `.xmp` sidecar files and the database.
However, Immich will delete original files that have been trashed when the trash is emptied in the Immich UI.
### Why do my file names appear as a random string in the file manager?
When Storage Template is off (default) Immich saves the file names in a random string (also known as random UUIDs) to prevent duplicate file names. To retrieve the original file names, you must enable the Storage Template and then run the STORAGE TEMPLATE MIGRATION job.
It is recommended to read about [Storage Template](https://immich.app/docs/administration/storage-template) before activation.
### Can I add my existing photo library?
@ -157,13 +168,26 @@ We haven't implemented an official mechanism for creating albums from external l
Duplicate checking only exists for upload libraries, using the file hash. Furthermore, duplicate checking is not global, but _per library_. Therefore, a situation where the same file appears twice in the timeline is possible, especially for external libraries.
### Why are my edits to files not being saved in read-only external libraries?
Images in read-write external libraries (the default) can be edited as normal.
In read-only libraries (`:ro` in the `docker-compose.yml`), Immich is unable to create the `.xmp` sidecar files to store edited file metadata.
For this reason, the metadata (timestamp, location, description, star rating, etc.) cannot be edited for files in read-only external libraries.
### How are deletions of files handled in external libraries?
Immich will attempt to delete original files that have been trashed when the trash is emptied.
In read-write external libraries (the default), Immich will delete the original file.
In read-only libraries (`:ro` in the `docker-compose.yml`), files can still be trashed in the UI.
However, when the trash is emptied, the files will re-appear in the main timeline since Immich is unable to delete the original file.
---
## Machine Learning
### How does smart search work?
Immich uses CLIP models. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
Immich uses CLIP models. An ML model converts each image to an "embedding", which is essentially a string of numbers that semantically encodes what is in the image. The same is done for the text that you enter when you do a search, and that text embedding is then compared with those of the images to find similar ones. As such, there are no "tags", "labels", or "descriptions" generated that you can look at. For more information about CLIP and its capabilities, read about it [here](https://openai.com/research/clip).
### How does facial recognition work?
@ -309,7 +333,11 @@ You may need to add mount points or docker volumes for the following internal co
- `immich-machine-learning:/.cache`
- `redis:/data`
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION`.
The non-root user/group needs read/write access to the volume mounts, including `UPLOAD_LOCATION` and `/cache` for machine-learning.
:::note Docker Compose Volumes
The Docker Compose top level volume element does not support non-root access, all of the above volumes must be local volume mounts.
:::
For a further hardened system, you can add the following block to every container except for `immich_postgres`.

View File

@ -15,12 +15,23 @@ Immich saves [file paths in the database](https://github.com/immich-app/immich/d
Refer to the official [postgres documentation](https://www.postgresql.org/docs/current/backup.html) for details about backing up and restoring a postgres database.
:::
The recommended way to backup and restore the Immich database is to use the `pg_dumpall` command. When restoring, you need to delete the `DB_DATA_LOCATION` folder (if it exists) to reset the database.
:::caution
It is not recommended to directly backup the `DB_DATA_LOCATION` folder. Doing so while the database is running can lead to a corrupted backup that cannot be restored.
:::
### Automatic Database Backups
Immich will automatically create database backups by default. The backups are stored in `UPLOAD_LOCATION/backups`.
You can adjust the schedule and amount of kept backups in the [admin settings](http://my.immich.app/admin/system-settings?isOpen=backup).
By default, Immich will keep the last 14 backups and create a new backup every day at 2:00 AM.
#### Restoring
We hope to make restoring simpler in future versions, for now you can find the backups in the `UPLOAD_LOCATION/backups` folder on your host.
Then please follow the steps in the following section for restoring the database.
### Manual Backup and Restore
<Tabs>
<TabItem value="Linux system" label="Linux system" default>
@ -29,83 +40,52 @@ docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgre
```
```bash title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# rm -rf DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
<TabItem value="Windows system (PowerShell)" label="Windows system (PowerShell)">
```powershell title='Backup'
docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres > "\path\to\backup\dump.sql"
[System.IO.File]::WriteAllLines("C:\absolute\path\to\backup\dump.sql", (docker exec -t immich_postgres pg_dumpall --clean --if-exists --username=postgres))
```
```powershell title='Restore'
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch.
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch.
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them.
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: - 'C:\path\to\backup\dump.sql':/dump.sql) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
gc "C:\path\to\backup\dump.sql" | docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default
cat "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --username=postgres # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
</Tabs>
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.).
Note that for the database restore to proceed properly, it requires a completely fresh install (i.e. the Immich server has never run since creating the Docker containers). If the Immich app has run, Postgres conflicts may be encountered upon database restoration (relation already exists, violated foreign key constraints, multiple primary keys, etc.), in which case you need to delete the `DB_DATA_LOCATION` folder to reset the database.
:::tip
Some deployment methods make it difficult to start the database without also starting the server or microservices. In these cases, you may set the environmental variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Note that both the server and microservices must have this variable set to prevent the migrations from running. Be sure to remove this variable and restart the services after the database is restored.
:::
The database dumps can also be automated (using [this image](https://github.com/prodrigestivill/docker-postgres-backup-local)) by editing the docker compose file to match the following:
```yaml
services:
...
backup:
container_name: immich_db_dumper
image: prodrigestivill/postgres-backup-local:14
restart: always
env_file:
- .env
environment:
POSTGRES_HOST: database
POSTGRES_CLUSTER: 'TRUE'
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_DATABASE_NAME}
SCHEDULE: "@daily"
POSTGRES_EXTRA_OPTS: '--clean --if-exists'
BACKUP_DIR: /db_dumps
volumes:
- ./db_dumps:/db_dumps
depends_on:
- database
```
Then you can restore with the same command but pointed at the latest dump.
```bash title='Automated Restore'
gunzip < db_dumps/last/immich-latest.sql.gz \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --username=postgres
```
:::note
If you see the error `ERROR: type "earth" does not exist`, or you have problems with Reverse Geocoding after a restore, add the following `sed` fragment to your restore command.
Example: `gunzip < "/path/to/backup/dump.sql.gz" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | docker exec -i immich_postgres psql --username=postgres`
Some deployment methods make it difficult to start the database without also starting the server. In these cases, you may set the environment variable `DB_SKIP_MIGRATIONS=true` before starting the services. This will prevent the server from running migrations that interfere with the restore process. Be sure to remove this variable and restart the services after the database is restored.
:::
## Filesystem
@ -157,7 +137,7 @@ for more info read the [release notes](https://github.com/immich-app/immich/rele
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `UPLOAD_LOCATION/postgres`.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!
@ -191,7 +171,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- Stored in `UPLOAD_LOCATION/profile/<userID>`.
- **Thumbs Images:**
- Preview images (blurred, small, large) for each asset and thumbnails for recognized faces.
- Stored in `UPLOCAD_LOCATION/thumbs/<userID>`.
- Stored in `UPLOAD_LOCATION/thumbs/<userID>`.
- **Encoded Assets:**
- Videos that have been re-encoded from the original for wider compatibility. The original is not removed.
- Stored in `UPLOAD_LOCATION/encoded-video/<userID>`.
@ -203,7 +183,7 @@ When you turn off the storage template engine, it will leave the assets in `UPLO
- The Immich database containing all the information to allow the system to function properly.
**Note:** This folder will only appear to users who have made the changes mentioned in [v1.102.0](https://github.com/immich-app/immich/discussions/8930) (an optional, non-mandatory change) or who started with this version.
- Stored in `UPLOAD_LOCATION/postgres`.
- Stored in `DB_DATA_LOCATION`.
:::danger
A backup of this folder does not constitute a backup of your database!

View File

@ -8,16 +8,20 @@ Immich supports the option to send notifications via Email for the following eve
## SMTP settings
You can access the settings panel from the web at `Administration -> Settings -> Notification settings`
You can access the settings panel from the web at `Administration -> Settings -> Notification settings`.
Under Email, enter the following details to connect with SMTP servers.
Under Email, enter the required details to connect with an SMTP server.
You can use the following [guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
<img src={require('./img/email-settings.png').default} width="80%" title="SMTP settings" />
You can use [this guide](/docs/guides/smtp-gmail) to use Gmail's SMTP server.
## User's notifications settings
Users can manage their email notification settings from their account settings page on the web. They can choose to turn email notifications on or off for the following events:
<img src={require('./img/user-notifications-settings.png').default} width="80%" title="User notification settings" />
## Notification templates
You can override the default notification text with custom templates in HTML format. You can use tags to show dynamic tags in your templates.
<img src={require('./img/user-notifications-templates.png').default} width="80%" title="User notification templates" />

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 218 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

View File

@ -22,7 +22,7 @@ Copy the entire `immich-server` block as a new service and make the following ch
- container_name: immich_server
...
- ports:
- - 2283:3001
- - 2283:2283
+ immich-microservices:
+ container_name: immich_microservices
```
@ -52,4 +52,4 @@ Additionally, some jobs run on a schedule, which is every night at midnight. Thi
Storage Migration job can be run after changing the [Storage Template](/docs/administration/storage-template.mdx), in order to apply the change to the existing library.
:::
<img src={require('./img/admin-jobs.png').default} width="80%" title="Admin jobs" />
<img src={require('./img/admin-jobs.webp').default} width="60%" title="Admin jobs" />

View File

@ -3,7 +3,7 @@
This page contains details about using OAuth in Immich.
:::tip
Unable to set `app.immich:/` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
Unable to set `app.immich:///oauth-callback` as a valid redirect URI? See [Mobile Redirect URI](#mobile-redirect-uri) for an alternative solution.
:::
## Overview
@ -11,7 +11,7 @@ Unable to set `app.immich:/` as a valid redirect URI? See [Mobile Redirect URI](
Immich supports 3rd party authentication via [OpenID Connect][oidc] (OIDC), an identity layer built on top of OAuth2. OIDC is supported by most identity providers, including:
- [Authentik](https://goauthentik.io/integrations/sources/oauth/#openid-connect)
- [Authelia](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/)
- [Authelia](https://www.authelia.com/integration/openid-connect/immich/)
- [Okta](https://www.okta.com/openid-connect/)
- [Google](https://developers.google.com/identity/openid-connect/openid-connect)
@ -30,7 +30,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
The **Sign-in redirect URIs** should include:
- `app.immich:/` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
- `app.immich:///oauth-callback` - for logging in with OAuth from the [Mobile App](/docs/features/mobile-app.mdx)
- `http://DOMAIN:PORT/auth/login` - for logging in with OAuth from the Web Client
- `http://DOMAIN:PORT/user-settings` - for manually linking OAuth in the Web Client
@ -38,7 +38,7 @@ Before enabling OAuth in Immich, a new client application needs to be configured
Mobile
- `app.immich:/` (You **MUST** include this for iOS and Android mobile apps to work properly)
- `app.immich:///oauth-callback` (You **MUST** include this for iOS and Android mobile apps to work properly)
Localhost
@ -96,16 +96,16 @@ When Auto Launch is enabled, the login page will automatically redirect the user
## Mobile Redirect URI
The redirect URI for the mobile app is `app.immich:/`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
The redirect URI for the mobile app is `app.immich:///oauth-callback`, which is a [Custom Scheme](https://developer.apple.com/documentation/xcode/defining-a-custom-url-scheme-for-your-app). If this custom scheme is an invalid redirect URI for your OAuth Provider, you can work around this by doing the following:
1. Configure an http(s) endpoint to forwards requests to `app.immich:/`
1. Configure an http(s) endpoint to forwards requests to `app.immich:///oauth-callback`
2. Whitelist the new endpoint as a valid redirect URI with your provider.
3. Specify the new endpoint as the `Mobile Redirect URI Override`, in the OAuth settings.
With these steps in place, you should be able to use OAuth from the [Mobile App](/docs/features/mobile-app.mdx) without a custom scheme redirect URI.
:::info
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:/`, and can be used for step 1.
Immich has a route (`/api/oauth/mobile-redirect`) that is already configured to forward requests to `app.immich:///oauth-callback`, and can be used for step 1.
:::
## Example Configuration
@ -154,21 +154,21 @@ Configuration of Authorised redirect URIs (Google Console)
Configuration of OAuth in Immich System Settings
| Setting | Value |
| ---------------------------- | ------------------------------------------------------------------------------------------------------ |
| Issuer URL | [https://accounts.google.com](https://accounts.google.com) |
| Client ID | 7\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***vuls.apps.googleusercontent.com |
| Client Secret | G\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***OO |
| Scope | openid email profile |
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Google (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled |
| Mobile Redirect URI Override | Enabled (required) |
| Mobile Redirect URI | [https://demo.immich.app/api/oauth/mobile-redirect](https://demo.immich.app/api/oauth/mobile-redirect) |
| Setting | Value |
| ---------------------------- | ---------------------------------------------------------------------------- |
| Issuer URL | `https://accounts.google.com` |
| Client ID | 7\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***vuls.apps.googleusercontent.com |
| Client Secret | G\***\*\*\*\*\*\*\***\*\*\***\*\*\*\*\*\*\***OO |
| Scope | openid email profile |
| Signing Algorithm | RS256 |
| Storage Label Claim | preferred_username |
| Storage Quota Claim | immich_quota |
| Default Storage Quota (GiB) | 0 (0 for unlimited quota) |
| Button Text | Sign in with Google (optional) |
| Auto Register | Enabled (optional) |
| Auto Launch | Enabled |
| Mobile Redirect URI Override | Enabled (required) |
| Mobile Redirect URI | `https://example.immich.app/api/oauth/mobile-redirect` |
</details>

View File

@ -13,9 +13,9 @@ Running with a pre-existing Postgres server can unlock powerful administrative f
You must install pgvecto.rs into your instance of Postgres using their [instructions][vectors-install]. After installation, add `shared_preload_libraries = 'vectors.so'` to your `postgresql.conf`. If you already have some `shared_preload_libraries` set, you can separate each extension with a comma. For example, `shared_preload_libraries = 'pg_stat_statements, vectors.so'`.
:::note
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported.
Immich is known to work with Postgres versions 14, 15, and 16. Earlier versions are unsupported. Postgres 17 is nominally compatible, but pgvecto.rs does not have prebuilt images or packages for it as of writing.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. For example, if your Immich version uses the dedicated database image `tensorchord/pgvecto-rs:pg14-v0.2.1`, you must install pgvecto.rs `>= 0.2.1, < 0.3.0`.
Make sure the installed version of pgvecto.rs is compatible with your version of Immich. The current accepted range for pgvecto.rs is `>= 0.2.0, < 0.4.0`.
:::
## Specifying the connection URL

View File

@ -1,5 +1,9 @@
# Repair Page
:::warning
This feature is currently disabled and will be reworked in the near future.
:::
The repair page is designed to give information to the system administrator about files that are not tracked, or offline paths.
## Natural State

View File

@ -40,6 +40,26 @@ server {
}
```
#### Compatibility with Let's Encrypt
In the event that your nginx configuration includes a section for Let's Encrypt, it's likely that you have a segment similar to the following:
```nginx
location ~ /.well-known {
...
}
```
This particular `location` directive can inadvertently prevent mobile clients from reaching the `/.well-known/immich` path, which is crucial for discovery. Usual error message for this case is: "Your app major version is not compatible with the server". To remedy this, you should introduce an additional location block specifically for this path, ensuring that requests are correctly proxied to the Immich server:
```nginx
location = /.well-known/immich {
proxy_pass http://<backend_url>:2283;
}
```
By doing so, you'll maintain the functionality of Let's Encrypt while allowing mobile clients to access the necessary Immich path without obstruction.
### Caddy example config
As an alternative to nginx, you can also use [Caddy](https://caddyserver.com/) as a reverse proxy (with automatic HTTPS configuration). Below is an example config.
@ -64,3 +84,43 @@ Below is an example config for Apache2 site configuration.
ProxyPreserveHost On
</VirtualHost>
```
### Traefik Proxy example config
The example below is for Traefik version 3.
The most important is to increase the `respondingTimeouts` of the entrypoint used by immich. In this example of entrypoint `websecure` for port `443`. Per default it's set to 60s which leeds to videos stop uploading after 1 minute (Error Code 499). With this config it will fail after 10 minutes which is in most cases enough. Increase it if needed.
`traefik.yaml`
```yaml
[...]
entryPoints:
websecure:
address: :443
# this section needs to be added
transport:
respondingTimeouts:
readTimeout: 600s
idleTimeout: 600s
writeTimeout: 600s
```
The second part is in the `docker-compose.yml` file where immich is in. Add the Traefik specific labels like in the example.
`docker-compose.yml`
```yaml
services:
immich-server:
[...]
labels:
traefik.enable: true
# increase readingTimeouts for the entrypoint used here
traefik.http.routers.immich.entrypoints: websecure
traefik.http.routers.immich.rule: Host(`immich.your-domain.com`)
traefik.http.services.immich.loadbalancer.server.port: 2283
```
Keep in mind, that Traefik needs to communicate with the network where immich is in, usually done
by adding the Traefik network to the `immich-server`.

View File

@ -7,7 +7,7 @@ If a storage quota has been defined for the user, the usage number will be displ
:::
:::info External library
External library is not included in the storage quota.
External libraries are not included in the storage quota.
:::
<img src={require('./img/server-stats.png').default} title="server statistic" />

View File

@ -0,0 +1,49 @@
# System Integrity
## Folder checks
:::info
The folders considered for these checks include: `upload/`, `library/`, `thumbs/`, `encoded-video/`, `profile/`, `backups/`
:::
When Immich starts, it performs a series of checks in order to validate that it can read and write files to the volume mounts used by the storage system. If it cannot perform all the required operations, it will fail to start. The checks include:
- Creating an initial hidden file (`.immich`) in each folder
- Reading a hidden file (`.immich`) in each folder
- Overwriting a hidden file (`.immich`) in each folder
The checks are designed to catch the following situations:
- Incorrect permissions (cannot read/write files)
- Missing volume mount (`.immich` files should exist, but are missing)
### Common issues
:::note
`.immich` files serve as markers and help keep track of volume mounts being used by Immich. Except for the situations listed below, they should never be manually created or deleted.
:::
#### Missing `.immich` files
```
Verifying system mount folder checks (enabled=true)
...
ENOENT: no such file or directory, open 'upload/encoded-video/.immich'
```
The above error messages show that the server has previously (successfully) written `.immich` files to each folder, but now does not detect them. This could be because any of the following:
- Permission error - unable to read the file, but it exists
- File does not exist - volume mount has changed and should be corrected
- File does not exist - user manually deleted it and should be manually re-created (`touch .immich`)
- File does not exist - user restored from a backup, but did not restore each folder (user should restore all folders or manually create `.immich` in any missing folders)
### Ignoring the checks
:::warning
The checks are designed to catch common problems that we have seen users have in the past, and often indicate there's something wrong that you should solve. If you know what you're doing and you want to disable them you can set the following environment variable:
:::
```
IMMICH_IGNORE_MOUNT_CHECK_ERRORS=true
```

View File

@ -104,7 +104,7 @@ You can choose to disable a certain type of machine learning, for example smart
### Smart Search
The smart search settings are designed to allow the search tool to be used using [CLIP](https://openai.com/research/clip) models that [can be changed](/docs/FAQ#can-i-use-a-custom-clip-model), different models will necessarily give better results but may consume more processing power, when changing a model it is mandatory to re-run the
The [smart search](/docs/features/smart-search) settings are designed to allow the search tool to be used using [CLIP](https://openai.com/research/clip) models that [can be changed](/docs/FAQ#can-i-use-a-custom-clip-model), different models will necessarily give better results but may consume more processing power, when changing a model it is mandatory to re-run the
Smart Search job on all images to fully apply the change.
:::info Internet connection
@ -113,15 +113,23 @@ After downloading, there is no need for Immich to connect to the network
Unless version checking has been enabled in the settings.
:::
### Duplicate Detection
Use CLIP embeddings to find likely duplicates. The maximum detection distance can be configured in order to improve / reduce the level of accuracy.
- **Maximum detection distance -** Maximum distance between two images to consider them duplicates, ranging from 0.001-0.1. Higher values will detect more duplicates, but may result in false positives.
### Facial Recognition
Under these settings, you can change the facial recognition settings
Editable settings:
- **Facial Recognition Model -** Models are listed in descending order of size. Larger models are slower and use more memory, but produce better results. Note that you must re-run the Face Detection job for all images upon changing a model.
- **Min Detection Score -** Minimum confidence score for a face to be detected from 0-1. Lower values will detect more faces but may result in false positives.
- **Max Recognition Distance -** Maximum distance between two faces to be considered the same person, ranging from 0-2. Lowering this can prevent labeling two people as the same person, while raising it can prevent labeling the same person as two different people. Note that it is easier to merge two people than to split one person in two, so err on the side of a lower threshold when possible.
- **Min Recognized Faces -** The minimum number of recognized faces for a person to be created (AKA: Core face). Increasing this makes Facial Recognition more precise at the cost of increasing the chance that a face is not assigned to a person.
- **Facial Recognition Model**
- **Min Detection Score**
- **Max Recognition Distance**
- **Min Recognized Faces**
You can learn more about these options on the [Facial Recognition page](/docs/features/facial-recognition#how-face-detection-works)
:::info
When changing the values in Min Detection Score, Max Recognition Distance, and Min Recognized Faces.
@ -149,11 +157,15 @@ Immich supports [Reverse Geocoding](/docs/features/reverse-geocoding) using data
SMTP server setup, for user creation notifications, new albums, etc. More information can be found [here](/docs/administration/email-notification)
## Notification Templates
Override the default notifications text with notification templates. More information can be found [here](/docs/administration/email-notification)
## Server Settings
### External Domain
When set, will override the domain name used when viewing and copying a shared link.
Overrides the domain name in shared links and email notifications. The URL should not include a trailing slash.
### Welcome Message

View File

@ -3,6 +3,7 @@ sidebar_position: 1
---
import AppArchitecture from './img/app-architecture.png';
import MobileArchitecture from './img/immich_mobile_architecture.svg';
# Architecture
@ -28,7 +29,14 @@ All three clients use [OpenAPI](./open-api.md) to auto-generate rest clients for
### Mobile App
The mobile app is written in [Flutter](https://flutter.dev/). It uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management.
The mobile app is written in [Dart](https://dart.dev/) using [Flutter](https://flutter.dev/). Below is an architecture overview:
<MobileArchitecture className="p-4 dark:bg-immich-dark-primary my-4" />
The diagrams shows the target architecture, the current state of the code-base is not always following the architecture yet. New code and contributions should follow this architecture.
Currently, it uses [Isar Database](https://isar.dev/) for a local database and [Riverpod](https://riverpod.dev/) for state management (providers).
Entities and Models are the two types of data classes used. While entities are stored in the on-device database, models are ephemeral and only kept in memory.
The Repositories should be the only place where other data classes are used internally (such as OpenAPI DTOs). However, their interfaces must not use foreign data classes!
### Web Client

View File

@ -15,7 +15,7 @@ Our [GitHub Repository](https://github.com/immich-app/immich) is a [monorepo](ht
| `design/` | Screenshots and logos for the README |
| `docs/` | Source code for the [https://immich.app](https://immich.app) website |
| `machine-learning/` | Source code for the `immich-machine-learning` docker image |
| `misc/release/` | Scripts for version pumps and draft releases |
| `misc/release/` | Scripts for version bumps and draft releases |
| `mobile/` | Source code for the mobile app, both Android and iOS |
| `server/` | Source code for the `immich-server` docker image |
| `web/` | Source code for the `web` |

View File

@ -0,0 +1,104 @@
<mxfile host="app.diagrams.net" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" version="24.7.16">
<diagram name="Page-1" id="Bp2gX--FtC4sSMWxsLrs">
<mxGraphModel dx="1728" dy="954" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" background="none" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="zHhczcy2-Jv_nqmJUiNH-1" value="" style="verticalLabelPosition=bottom;verticalAlign=top;html=1;shape=mxgraph.basic.polygon;polyCoords=[[0.25,0],[0.75,0],[1,0.25],[1,0.75],[0.75,1],[0.25,1],[0,0.75],[0,0.25]];polyline=0;strokeWidth=4;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="280" y="217.5" width="465" height="465" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-2" value="&lt;b&gt;&lt;font style=&quot;font-size: 22px;&quot;&gt;Mobile App&lt;/font&gt;&lt;/b&gt;" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;rounded=1;fontColor=#ffffff;" vertex="1" parent="1">
<mxGeometry x="442.5" y="225" width="140" height="40" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-25" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-4" target="zHhczcy2-Jv_nqmJUiNH-5">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-4" value="Services" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="530" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-26" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-12">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-5" value="Repositories" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#1E83F7;" vertex="1" parent="1">
<mxGeometry x="650" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-24" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-4">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-6" value="Providers" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="410" y="420" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-29" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-8">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-30" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-7" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-7" value="Pages" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="480" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-31" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.25;entryDx=0;entryDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-8" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-8" value="Widgets" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="290" y="360" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-11" value="User" style="shape=umlActor;verticalLabelPosition=bottom;verticalAlign=top;html=1;outlineConnect=0;rounded=1;fillColor=#4251B0;" vertex="1" parent="1">
<mxGeometry x="180" y="368.5" width="81.5" height="163" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-12" value="platform&lt;div&gt;system&lt;/div&gt;" style="rhombus;whiteSpace=wrap;html=1;rounded=1;fillColor=#ED79B5;" vertex="1" parent="1">
<mxGeometry x="800" y="410" width="80" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-13" value="on-device&lt;div&gt;database&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;rounded=1;fillColor=#FA2921;" vertex="1" parent="1">
<mxGeometry x="810" y="310" width="60" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-14" value="server" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;rounded=1;fillColor=#FFB400;" vertex="1" parent="1">
<mxGeometry x="780" y="500" width="120" height="80" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-16" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0.07;entryY=0.4;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-14">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-39" value="OpenAPI" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];rounded=1;labelBackgroundColor=#1E83F7;" vertex="1" connectable="0" parent="zHhczcy2-Jv_nqmJUiNH-16">
<mxGeometry x="0.0697" y="1" relative="1" as="geometry">
<mxPoint x="8" y="10" as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-23" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-6" target="zHhczcy2-Jv_nqmJUiNH-6">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-27" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;entryX=0;entryY=1;entryDx=0;entryDy=-15;entryPerimeter=0;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-5" target="zHhczcy2-Jv_nqmJUiNH-13">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-34" style="edgeStyle=none;rounded=1;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;dashed=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-3">
<mxGeometry relative="1" as="geometry">
<mxPoint x="810" y="360" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-36" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" source="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-37" value="UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="387.5" y="640" width="70" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-38" value="non-UI part" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=1;fontSize=14;fontColor=#FFFFFF;" vertex="1" parent="1">
<mxGeometry x="550" y="640" width="90" height="30" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-41" value="" style="endArrow=none;dashed=1;html=1;rounded=1;" edge="1" parent="1" target="zHhczcy2-Jv_nqmJUiNH-9">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="512.08" y="665" as="sourcePoint" />
<mxPoint x="512.08" y="265" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-9" value="Models" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="470" y="510" width="80" height="60" as="geometry" />
</mxCell>
<mxCell id="zHhczcy2-Jv_nqmJUiNH-3" value="Entities" style="rounded=1;whiteSpace=wrap;html=1;gradientColor=none;fillColor=#18C249;" vertex="1" parent="1">
<mxGeometry x="472.5" y="330" width="80" height="60" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -1,5 +1,9 @@
# PR Checklist
A minimal devcontainer is supplied with this repository. All commands can be executed directly inside this container to avoid tedious installation of the environment.
:::warning
The provided devcontainer isn't complete at the moment. At least all dockerized steps in the Makefile won't work (`make dev`, ....). Feel free to contribute!
:::
When contributing code through a pull request, please check the following:
## Web Checks
@ -7,6 +11,7 @@ When contributing code through a pull request, please check the following:
- [ ] `npm run lint` (linting via ESLint)
- [ ] `npm run format` (formatting via Prettier)
- [ ] `npm run check:svelte` (Type checking via SvelteKit)
- [ ] `npm run check:typescript` (check typescript)
- [ ] `npm test` (unit tests)
## Documentation

View File

@ -39,13 +39,16 @@ All the services are packaged to run as with single Docker Compose command.
make dev # required Makefile installed on the system.
```
5. Access the dev instance in your browser at http://localhost:2283, or connect via the mobile app.
5. Access the dev instance in your browser at http://localhost:3000, or connect via the mobile app.
All the services will be started with hot-reloading enabled for a quick feedback loop.
You can access the web from `http://your-machine-ip:2283` or `http://localhost:2283` and access the server from the mobile app at `http://your-machine-ip:2283/api`
You can access the web from `http://your-machine-ip:3000` or `http://localhost:3000` and access the server from the mobile app at `http://your-machine-ip:3000/api`
**Note:** the "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
**Notes:**
- The "web" development container runs with uid 1000. If that uid does not have read/write permissions on the mounted volumes, you may encounter errors
- In case of rootless docker setup, you need to use root within the container, otherwise you will encounter read/write permission related errors, see comments in `docker/docker-compose.dev.yml`.
#### Connect web to a remote backend
@ -76,7 +79,7 @@ Setting these in the IDE give a better developer experience, auto-formatting cod
### Dart Code Metrics
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/getting-started/#installation) page for more information on setting up DCM
The mobile app uses DCM (Dart Code Metrics) for linting and metrics calculation. Please refer to the [Getting Started](https://dcm.dev/docs/) page for more information on setting up DCM
Note: Activating the license is not required.
@ -106,7 +109,7 @@ in User `settings.json` (`cmd + shift + p` and search for `Open User Settings JS
"editor.suggest.snippetsPreventQuickSuggestions": false,
"editor.suggestSelection": "first",
"editor.tabCompletion": "onlySnippets",
"editor.wordBasedSuggestions": false,
"editor.wordBasedSuggestions": "off",
"editor.defaultFormatter": "Dart-Code.dart-code"
}
}

View File

@ -1,7 +1,7 @@
# Hardware Transcoding [Experimental]
This feature allows you to use a GPU to accelerate transcoding and reduce CPU load.
Note that hardware transcoding is much less efficient for file sizes.
Note that hardware transcoding produces significantly larger videos than software transcoding with similar settings, typically with lower quality. Using slow presets and preferring more efficient codecs can narrow this gap.
As this is a new feature, it is still experimental and may not work on all systems.
:::info
@ -23,7 +23,7 @@ You do not need to redo any transcoding jobs after enabling hardware acceleratio
- Raspberry Pi is currently not supported.
- Two-pass mode is only supported for NVENC. Other APIs will ignore this setting.
- By default, only encoding is currently hardware accelerated. This means the CPU is still used for software decoding and tone-mapping.
- NVENC and RKMPP can be fully accelerated by enabling hardware decoding in the video transcoding settings.
- You can benefit from end-to-end acceleration by enabling hardware decoding in the video transcoding settings.
- Hardware dependent
- Codec support varies, but H.264 and HEVC are usually supported.
- Notably, NVIDIA and AMD GPUs do not support VP9 encoding.
@ -49,7 +49,7 @@ For RKMPP to work:
- You must have a supported Rockchip ARM SoC.
- Only RK3588 supports hardware tonemapping, other SoCs use slower software tonemapping while still using hardware encoding.
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install [`libmali-valhall-g610-g6p0-gbm`][libmali-rockchip] and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- Tonemapping requires `/usr/lib/aarch64-linux-gnu/libmali.so.1` to be present on your host system. Install the [`libmali`][libmali-rockchip] release that corresponds to your Mali GPU (`libmali-valhall-g610-g13p0-gbm` on RK3588) and modify the [`hwaccel.transcoding.yml`][hw-file] file:
- under `rkmpp` uncomment the 3 lines required for OpenCL tonemapping by removing the `#` symbol at the beginning of each line
- `- /dev/mali0:/dev/mali0`
- `- /etc/OpenCL:/etc/OpenCL:ro`
@ -62,11 +62,14 @@ For RKMPP to work:
1. If you do not already have it, download the latest [`hwaccel.transcoding.yml`][hw-file] file and ensure it's in the same folder as the `docker-compose.yml`.
2. In the `docker-compose.yml` under `immich-server`, uncomment the `extends` section and change `cpu` to the appropriate backend.
- For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
Note: For VAAPI on WSL2, be sure to use `vaapi-wsl` rather than `vaapi`
3. Redeploy the `immich-server` container with these updated settings.
4. In the Admin page under `Video transcoding settings`, change the hardware acceleration setting to the appropriate option and save.
5. (Optional) If using a compatible backend, you may enable hardware decoding for optimal performance.
Note: For Jasper Lake and Elkhart Lake CPUs, you will need to set the `Hardware Acceleration` -> `Constant quality mode` to `CQP`
5. (Optional) Enable hardware decoding for optimal performance.
#### Single Compose File
@ -89,16 +92,7 @@ immich-server:
devices:
- /dev/dri:/dev/dri
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:3001
depends_on:
- redis
- database
restart: always
...
```
Once this is done, you can continue to step 3 of "Basic Setup".

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 379 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

View File

@ -1,18 +1,14 @@
# Libraries
# External Libraries
## Overview
External libraries track assets stored in the filesystem outside of Immich. When the external library is scanned, Immich will load videos and photos from disk and create the corresponding assets. These assets will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc. Later, if a file is modified outside of Immich, you need to scan the library for the changes to show up.
Immich supports the creation of libraries which is a top-level asset container. Currently, there are two types of libraries: traditional upload libraries that can sync with a mobile device, and external libraries, that keeps up to date with files on disk. Libraries are different from albums in that an asset can belong to multiple albums but only one library, and deleting a library deletes all assets contained within. As of August 2023, this is a new feature and libraries have a lot of potential for future development beyond what is documented here. This document attempts to describe the current state of libraries.
If an external asset is deleted from disk, Immich will move it to trash on rescan. To restore the asset, you need to restore the original file. After 30 days the file will be removed from trash, and any changes to metadata within Immich will be lost.
## External Libraries
:::caution
External libraries tracks assets stored outside of Immich, i.e. in the file system. When the external library is scanned, Immich will read the metadata from the file and create an asset in the library for each image or video file. These items will then be shown in the main timeline, and they will look and behave like any other asset, including viewing on the map, adding to albums, etc.
If you add metadata to an external asset in any way (i.e. add it to an album or edit the description), that metadata is only stored inside Immich and will not be persisted to the external asset file. If you move an asset to another location within the library all such metadata will be lost upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
If a file is modified outside of Immich, the changes will not be reflected in immich until the library is scanned again. There are different ways to scan a library depending on the use case:
- Scan Library Files: This is the default scan method and also the quickest. It will scan all files in the library and add new files to the library. It will notice if any files are missing (see below) but not check existing assets
- Scan All Library Files: Same as above, but will check each existing asset to see if the modification time has changed. If it has, the asset will be updated. Since it has to check each asset, this is slower than Scan Library Files.
- Force Scan All Library Files: Same as above, but will read each asset from disk no matter the modification time. This is useful in some cases where an asset has been modified externally but the modification time has not changed. This is the slowest way to scan because it reads each asset from disk.
:::
:::caution
@ -20,22 +16,6 @@ Due to aggressive caching it can take some time for a refreshed asset to appear
:::
In external libraries, the file path is used for duplicate detection. This means that if a file is moved to a different location, it will be added as a new asset. If the file is moved back to its original location, it will be added as a new asset. In contrast to upload libraries, two identical files can be uploaded if they are in different locations. This is a deliberate design choice to make Immich reflect the file system as closely as possible. Remember that duplication detection is only done within the same library, so if you have multiple external libraries, the same file can be added to multiple libraries.
:::caution
If you add assets from an external library to an album and then move the asset to another location within the library, the asset will be removed from the album upon rescan. This is because the asset is considered a new asset after the move. This is a known issue and will be fixed in a future release.
:::
### Deleted External Assets
Note: Either a manual or scheduled library scan must have been performed to identify offline assets before this process will work.
In all above scan methods, Immich will check if any files are missing. This can happen if files are deleted, or if they are on a storage location that is currently unavailable, like a network drive that is not mounted, or a USB drive that has been unplugged. In order to prevent accidental deletion of assets, Immich will not immediately delete an asset from the library if the file is missing. Instead, the asset will be internally marked as offline and will still be visible in the main timeline. If the file is moved back to its original location and the library is scanned again, the asset will be restored.
Finally, files can be deleted from Immich via the `Remove Offline Files` job. This job can be found by the three dots menu for the associated external storage that was configured under Administration > Libraries (the same location described at [create external libraries](#create-external-libraries)). When this job is run, any assets marked as offline will then be removed from Immich. Run this job whenever files have been deleted from the file system and you want to remove them from Immich.
### Import Paths
External libraries use import paths to determine which files to scan. Each library can have multiple import paths so that files from different locations can be added to the same library. Import paths are scanned recursively, and if a file is in multiple import paths, it will only be added once. Each import file must be a readable directory that exists on the filesystem; the import path dialog will alert you of any paths that are not accessible.
@ -66,9 +46,13 @@ Some basic examples:
- `**/Raw/**` will exclude all files in any directory named `Raw`
- `**/*.{tif,jpg}` will exclude all files with the extension `.tif` or `.jpg`
Special characters such as @ should be escaped, for instance:
- `**/\@eadir/**` will exclude all files in any directory named `@eadir`
### Automatic watching (EXPERIMENTAL)
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan. Deleted assets are, as always, marked as offline and can be removed with the "Remove offline files" button.
This feature - currently hidden in the config file - is considered experimental and for advanced users only. If enabled, it will allow automatic watching of the filesystem which means new assets are automatically imported to Immich without needing to rescan.
If your photos are on a network drive, automatic file watching likely won't work. In that case, you will have to rely on a periodic library refresh to pull in your changes.
@ -84,7 +68,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic job that's run once a day and refreshes all modified files in all libraries as well as cleans up any libraries stuck in deletion.
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
## Usage
@ -104,22 +88,23 @@ The `immich-server` container will need access to the gallery. Modify your docke
immich-server:
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
+ - /mnt/nas/christmas-trip:/mnt/nas/christmas-trip:ro
+ - /home/user/old-pics:/home/user/old-pics:ro
+ - /mnt/nas/christmas-trip:/mnt/media/christmas-trip:ro
+ - /home/user/old-pics:/mnt/media/old-pics:ro
+ - /mnt/media/videos:/mnt/media/videos:ro
+ - /mnt/media/videos2:/mnt/media/videos2 # the files in this folder can be deleted, as it does not end with :ro
+ - "C:/Users/user_name/Desktop/my media:/mnt/media/my-media:ro" # import path in Windows system.
```
:::tip
The `ro` flag at the end only gives read-only access to the volumes. This will disallow the images from being deleted in the web UI.
The `ro` flag at the end only gives read-only access to the volumes.
This will disallow the images from being deleted in the web UI, or adding metadata to the library ([XMP sidecars](/docs/features/xmp-sidecars)).
:::
:::info
_Remember to run `docker compose up -d` to register the changes. Make sure you can see the mounted path in the container._
:::
### Create External Libraries
### Create A New Library
These actions must be performed by the Immich administrator.
@ -143,7 +128,7 @@ Next, we'll add an exclusion pattern to filter out raw files.
- Enter `**/Raw/**` and click save.
- Click save
- Click the drop-down menu on the newly created library
- Click on Scan Library Files
- Click on Scan
The christmas trip library will now be scanned in the background. In the meantime, let's add the videos and old photos to another library.
@ -160,10 +145,26 @@ If you get an error here, please rename the other external library to something
- Click on Add Path
- Enter `/mnt/media/videos` then click Add
- Click Save
- Click on Scan Library Files
- Click on Scan
Within seconds, the assets from the old-pics and videos folders should show up in the main timeline.
### Folder view
:::info
This feature also exists for assets uploaded other than through external libraries.
:::tip
You can use the storage template migration feature for the best experience with uploaded assets in this view.
:::
You can browse your photos and videos by folder like in a file explorer.
Enable this feature from the Users Settings > Features > Folders.
The UI is currently only available for the web; mobile will come in a subsequent release.
<img src={require('./img/folder-view.png').default} width="75%" title='Folder-view' />
### Set Custom Scan Interval
:::note

View File

@ -38,7 +38,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- The GPU must have compute capability 5.2 or greater.
- The server must have the official NVIDIA driver installed.
- The installed driver must be >= 545 (it must support CUDA 12.3.2).
- The installed driver must be >= 535 (it must support CUDA 12.2).
- On Linux (except for WSL2), you also need to have [NVIDIA Container Toolkit][nvct] installed.
#### OpenVINO
@ -53,6 +53,12 @@ You do not need to redo any machine learning jobs after enabling hardware accele
3. Still in `immich-machine-learning`, add one of -[armnn, cuda, openvino] to the `image` section's tag at the end of the line.
4. Redeploy the `immich-machine-learning` container with these updated settings.
### Confirming Device Usage
You can confirm the device is being recognized and used by checking its utilization. There are many tools to display this, such as `nvtop` for NVIDIA or Intel and `intel_gpu_top` for Intel.
You can also check the logs of the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, or when you search with text in Immich, you should either see a log for `Available ORT providers` containing the relevant provider (e.g. `CUDAExecutionProvider` in the case of CUDA), or a `Loaded ANN model` log entry without errors in the case of ARM NN.
#### Single Compose File
Some platforms, including Unraid and Portainer, do not support multiple Compose files as of writing. As an alternative, you can "inline" the relevant contents of the [`hwaccel.ml.yml`][hw-file] file into the `immich-machine-learning` service directly.
@ -95,9 +101,22 @@ immich-machine-learning:
Once this is done, you can redeploy the `immich-machine-learning` container.
:::info
You can confirm the device is being recognized and used by checking its utilization (via `nvtop` for CUDA, `intel_gpu_top` for OpenVINO, etc.). You can also enable debug logging by setting `IMMICH_LOG_LEVEL=debug` in the `.env` file and restarting the `immich-machine-learning` container. When a Smart Search or Face Detection job begins, you should see a log for `Available ORT providers` containing the relevant provider. In the case of ARM NN, the absence of a `Could not load ANN shared libraries` log entry means it loaded successfully.
:::
#### Multi-GPU
If you want to utilize multiple NVIDIA or Intel GPUs, you can set the `MACHINE_LEARNING_DEVICE_IDS` environmental variable to a comma-separated list of device IDs and set `MACHINE_LEARNING_WORKERS` to the number of listed devices. You can run a command such as `nvidia-smi -L` or `glxinfo -B` to see the currently available devices and their corresponding IDs.
For example, if you have devices 0 and 1, set the values as follows:
```
MACHINE_LEARNING_DEVICE_IDS=0,1
MACHINE_LEARNING_WORKERS=2
```
In this example, the machine learning service will spawn two workers, one of which will allocate models to device 0 and the other to device 1. Different requests will be processed by one worker or the other.
This approach can be used to simply specify a particular device as well. For example, setting `MACHINE_LEARNING_DEVICE_IDS=1` will ensure device 1 is always used instead of device 0.
Note that you should increase job concurrencies to increase overall utilization and more effectively distribute work across multiple GPUs. Additionally, each GPU must be able to load all models. It is not possible to distribute a single model to multiple GPUs that individually have insufficient VRAM, or to delegate a specific model to one GPU.
[hw-file]: https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml
[nvct]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html

View File

@ -1,6 +1,9 @@
import Icon from '@mdi/react';
import { mdiCloudOffOutline, mdiCloudCheckOutline } from '@mdi/js';
import MobileAppDownload from '/docs/partials/_mobile-app-download.md';
import MobileAppLogin from '/docs/partials/_mobile-app-login.md';
import MobileAppBackup from '/docs/partials/_mobile-app-backup.md';
import { cloudDonePath, cloudOffPath } from '@site/src/components/svg-paths';
# Mobile App
@ -27,3 +30,63 @@ The beta release channel allows users to test upcoming changes before they are o
:::info
You can enable automatic backup on supported devices. For more information see [Automatic Backup](/docs/features/automatic-backup.md).
:::
## Sync only selected photos
If you have a large number of photos on the device, and you would prefer not to backup all the photos, then it might be prudent to only backup selected photos from device to the Immich server.
First, you need to enable the Storage Indicator in your app's settings. Navigate to **<ins>Settings -> Photo Grid</ins>** and enable **"Show Storage indicator on asset tiles"**; this makes it easy to distinguish local-only assets and synced assets.
:::note
This will enable a small cloud icon on the bottom right corner of the asset tile, indicating that the asset is synced to the server:
1. <Icon path={mdiCloudOffOutline} size={1} /> - Local-only asset; not synced to the server
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server :::
Now make sure that the local album is selected in the backup screen (steps 1-2 above). You can find these albums listed in **<ins>Library -> On this device</ins>**. To selectively upload photos from these albums, simply select the local-only photos and tap on "Upload" button in the dynamic bottom menu.
<img
src={require('./img/mobile-upload-open-photo.png').default}
width="50%"
title="Upload button on local asset preview"
/>
<img
src={require('./img/mobile-upload-selected-photos.png').default}
width="40%"
title="Upload button after photos selection"
/>
## Album Sync
You can sync or mirror an album from your phone to the Immich server on your account. For example, if you select Recents, Camera and Videos album for backup, the corresponding album with the same name will be created on the server. Once the assets from those albums are uploaded, they will be put into the target albums automatically.
### Album Synchronization Highlights
- **One-Way Sync:** Synchronization is one-way, from the device to the server.
- **Name Matching:** If an album on the server has the same name as the album on the device, images from the device will be merged with the existing images in the server album.
- **Shared Albums:** If the matching album on the server is shared, the new photos merged into the album will also be shared.
- **Album Structure:** When an album is created for the first time, its structure is based on the initial state. Future updates made on the phone (such as deleting or repositioning photos) will not be reflected in Immich.
- **User-Specific Sync:** Album synchronization is unique to each server user and does not sync between different users or partners.
- **Mobile-Only Feature:** Album synchronization is currently only available on mobile. For similar options on a computer, refer to [Libraries](/docs/features/libraries) for further details.
### Synchronizing albums from the past
Albums can be synchronized to the server even if they did not exist on the server before. In order to apply this setting you have to:
Enter the cloud on the top right -> cog wheel on the top right -> select the sync option under Sync albums.
:::info Sync albums delete/move photos
If you delete/move photos in the local album on your device, it will not be reflected in the album on the server **even if** you click Sync albums
It will only reflect files you add.
:::
If the same asset is in more than one album it will only sync to the first album it's in, after that it won't sync again even if the user clicks sync albums manually.
To overcome this limitation, the files must be removed from the blacklist by
App settings -> Advanced -> Duplicate Assets -> Clear
:::info
Cleaning duplicate assets from the list will cause all the previously uploaded duplicate files to be re-uploaded, the files will not actually be uploaded and will be rejected on the server side (due to duplication) but will be synchronized to the album and at the end will be added to the black list again at the end of the synchronization.
:::

View File

@ -25,10 +25,10 @@ The metrics in immich are grouped into API (endpoint calls and response times),
### Configuration
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_METRICS=true` environmental variable to your `.env` file. Note that only the server and microservices containers currently use this variable.
Immich will not expose an endpoint for metrics by default. To enable this endpoint, you can add the `IMMICH_TELEMETRY_INCLUDE=all` environmental variable to your `.env` file. Note that only the server container currently use this variable.
:::tip
`IMMICH_METRICS` enables all metrics, but there are also [environmental variables](/docs/install/environment-variables.md#prometheus) to toggle specific metric groups. If you'd like to only expose certain kinds of metrics, you can set only those environmental variables to `true`. Explicitly setting the environmental variable for a metric group overrides `IMMICH_METRICS` for that group. For example, setting `IMMICH_METRICS=true` and `IMMICH_API_METRICS=false` will enable all metrics except API metrics.
`IMMICH_TELEMETRY_INCLUDE=all` enables all metrics. For a more granular configuration you can enumerate the telemetry metrics that should be included as a comma separated list (e.g. `IMMICH_TELEMETRY_INCLUDE=repo,api`). Alternatively, you can also exclude specific metrics with `IMMICH_TELEMETRY_EXCLUDE`. For more information refer to the [environment section](/docs/install/environment-variables.md#prometheus).
:::
The next step is to configure a new or existing Prometheus instance to scrape this endpoint. The following steps assume that you do not have an existing Prometheus instance, but the steps will be similar either way.

View File

@ -16,7 +16,7 @@ When sharing shared albums, whats shared is:
- Download all assets as zip file (Web only).
:::info Archive size limited.
If the size of the album exceeds 4GB, the archive files will be divided into 4GB each.
If the size of the album exceeds 4GB, the archive files will by default be divided into 4GB each. This can be changed on the user settings page.
:::
- Add a description to the album (Web only).
- Slideshow view (Web only).
@ -73,14 +73,14 @@ You can edit the link properties, options include;
- **Allow public user to download -** whether to allow whoever has the link to download all the images or a certain image (optional).
- **Allow public user to upload -** whether to allow whoever has the link to upload assets to the album (optional).
:::info
whoever has the link and have uploaded files cannot delete them.
Whoever has the link and have uploaded files cannot delete them.
:::
- **Expire after -** adding an expiration date to the link (optional).
## Share Specific Assets
A user can share specific assets without linking them to a specific album.
in order to do so;
In order to do this:
1. Go to the timeline
2. Select the assets (Shift can be used for multiple selection)
@ -152,7 +152,7 @@ Some of the features are not available on mobile, to understand what the full fe
## Sharing Between Users
#### Add or remove users from the album.
#### Add or remove users from the album
:::info remove user(s)
When a user is removed from the album, the photos he uploaded will still appear in the album.

View File

@ -1,15 +1,15 @@
# Files Custom Locations
This guide explains storing generated and raw files with docker's volume mount in different locations.
This guide explains how to store generated and raw files with docker's volume mount in different locations.
:::caution Backup
It is important to remember to update the backup settings after following the guide to back up the new backup paths if using automatic backup tools, especially `profile/`.
:::
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server in the future
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server
```diff title=".env"
# You can find documentation for all the supported env variables [here](/docs/install/environment-variables)
# You can find documentation for all the supported environment variables [here](/docs/install/environment-variables)
# Custom location where your uploaded, thumbnails, and transcoded video files are stored
- UPLOAD_LOCATION=./library
@ -17,10 +17,11 @@ In our `.env` file, we will define variables that will help us in the future whe
+ THUMB_LOCATION=/custom/path/immich/thumbs
+ ENCODED_VIDEO_LOCATION=/custom/path/immich/encoded-video
+ PROFILE_LOCATION=/custom/path/immich/profile
+ BACKUP_LOCATION=/custom/path/immich/backups
...
```
After defining the locations for these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
```diff title="docker-compose.yml"
services:
@ -30,6 +31,7 @@ services:
+ - ${THUMB_LOCATION}:/usr/src/app/upload/thumbs
+ - ${ENCODED_VIDEO_LOCATION}:/usr/src/app/upload/encoded-video
+ - ${PROFILE_LOCATION}:/usr/src/app/upload/profile
+ - ${BACKUP_LOCATION}:/usr/src/app/upload/backups
- /etc/localtime:/etc/localtime:ro
```
@ -41,12 +43,11 @@ docker compose up -d
:::note
Because of the underlying properties of docker bind mounts, it is not recommended to mount the `upload/` and `library/` folders as separate bind mounts if they are on the same device.
For this reason, we mount the HDD or network storage to `/usr/src/app/upload` and then mount the folders we want quick access to below this folder.
For this reason, we mount the HDD or the network storage (NAS) to `/usr/src/app/upload` and then mount the folders we want to access under that folder.
The `thumbs/` folder contains both the small thumbnails shown in the timeline, and the larger previews shown when clicking into an image. These cannot be split up.
The `thumbs/` folder contains both the small thumbnails displayed in the timeline and the larger previews shown when clicking into an image. These cannot be separated.
The storage metrics of the Immich server will track the storage available at `UPLOAD_LOCATION`,
so the administrator should setup some kind of monitoring to make sure the SSD does not run out of space. The `profile/` folder is much smaller, typically less than 1 MB.
The storage metrics of the Immich server will track available storage at `UPLOAD_LOCATION`, so the administrator must set up some sort of monitoring to ensure the storage does not run out of space. The `profile/` folder is much smaller, usually less than 1 MB.
:::
Thanks to [Jrasm91](https://github.com/immich-app/immich/discussions/2110#discussioncomment-5477767) for writing the guide.

View File

@ -1,8 +1,22 @@
# Create Custom Map Styles for Immich Using Maptiler
# Custom Map Styles
You may decide that you'd like to modify the style document which is used to draw the maps in Immich. This can be done easily using Maptiler, if you do not want to write an entire JSON document by hand.
You may decide that you'd like to modify the style document which is used to
draw the maps in Immich. In addition to visual customization, this also allows
you to pick your own map tile provider instead of the default one. The default
`style.json` for [light theme](https://github.com/immich-app/immich/tree/main/server/resources/style-light.json)
and [dark theme](https://github.com/immich-app/immich/blob/main/server/resources/style-dark.json)
can be used as a basis for creating your own style.
## Steps
There are several sources for already-made `style.json` map themes, as well as
online generators you can use.
1. In **Immich**, navigate to **Administration --> Settings --> Map & GPS Settings** and expand the **Map Settings** subsection.
2. Paste the link to your JSON style in either the **Light Style** or **Dark Style**. (You can add different styles which will help make the map style more appropriate depending on whether you set **Immich** to Light or Dark mode.)
3. Save your selections. Reload the map, and enjoy your custom map style!
## Use Maptiler to build a custom style
Customizing the map style can be done easily using Maptiler, if you do not want to write an entire JSON document by hand.
1. Create a free account at https://cloud.maptiler.com
2. Once logged in, you can either create a brand new map by clicking on **New Map**, selecting a starter map, and then clicking **Customize**, OR by selecting a **Standard Map** and customizing it from there.
@ -11,6 +25,3 @@ You may decide that you'd like to modify the style document which is used to dra
5. Next, **Publish** your style using the **Publish** button at the top right. This will deploy it to production, which means it is able to be exposed over the Internet. Maptiler will present an interactive side-by-side map with the original and your changes prior to publication.<br/>![Maptiler Publication Settings](img/immich_map_styles_publish.png)
6. Maptiler will warn you that changing the map will change it across all apps using the map. Since no apps are using the map yet, this is okay.
7. Clicking on the name of your new map at the top left will bring you to the item's **details** page. From here, copy the link to the JSON style under **Use vector style**. This link will automatically contain your personal API key to Maptiler.
8. In **Immich**, navigate to **Administration --> Settings --> Map & GPS Settings** and expand the **Map Settings** subsection.
9. Paste the link to your JSON style in either the **Light Style** or **Dark Style**. (You can add different styles which will help make the map style more appropriate depending on whether you set **Immich** to Light or Dark mode.
10. Save your selections. Reload the map, and enjoy your custom map style!

View File

@ -23,7 +23,7 @@ SELECT * FROM "assets" WHERE "originalFileName" LIKE '%_2023_%'; -- all files wi
```
```sql title="Find by path"
SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_20230903_232542848.jpg';
SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09-03/PXL_2023.jpg';
SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
```
@ -37,6 +37,12 @@ SELECT * FROM "assets" WHERE "checksum" = decode('69de19c87658c4c15d9cacb9967b8e
SELECT * FROM "assets" WHERE "checksum" = '\x69de19c87658c4c15d9cacb9967b8e033bf74dd1'; -- alternate notation
```
```sql title="Find duplicate assets with identical checksum (SHA-1) (excluding trashed files)"
SELECT T1."checksum", array_agg(T2."id") ids FROM "assets" T1
INNER JOIN "assets" T2 ON T1."checksum" = T2."checksum" AND T1."id" != T2."id" AND T2."deletedAt" IS NULL
WHERE T1."deletedAt" IS NULL GROUP BY T1."checksum";
```
```sql title="Live photos"
SELECT * FROM "assets" WHERE "livePhotoVideoId" IS NOT NULL;
```
@ -79,8 +85,7 @@ SELECT "assets"."type", COUNT(*) FROM "assets" GROUP BY "assets"."type";
```sql title="Count by type (per user)"
SELECT "users"."email", "assets"."type", COUNT(*) FROM "assets"
JOIN "users" ON "assets"."ownerId" = "users"."id"
GROUP BY "assets"."type", "users"."email"
ORDER BY "users"."email";
GROUP BY "assets"."type", "users"."email" ORDER BY "users"."email";
```
```sql title="Failed file movements"
@ -93,6 +98,10 @@ SELECT * FROM "move_history";
SELECT * FROM "users";
```
```sql title="Get owner info from asset ID"
SELECT "users".* FROM "users" JOIN "assets" ON "users"."id" = "assets"."ownerId" WHERE "assets"."id" = 'fa310b01-2f26-4b7a-9042-d578226e021f';
```
## System Config
```sql title="Custom settings"

View File

@ -7,7 +7,7 @@ in a directory on the same machine.
# Mount the directory into the containers.
Edit `docker-compose.yml` to add one or more new mount points in the section `immich-server:` under `volumes:`.
If you want Immich to be able to delete the images in the external library, remove `:ro` from the end of the mount point.
If you want Immich to be able to delete the images in the external library or add metadata ([XMP sidecars](/docs/features/xmp-sidecars)), remove `:ro` from the end of the mount point.
```diff
immich-server:

View File

@ -11,13 +11,13 @@ Never forward port 2283 directly to the internet without additional configuratio
You may use a VPN service to open an encrypted connection to your Immich instance. OpenVPN and Wireguard are two popular VPN solutions. Here is a guide on setting up VPN access to your server - [Pihole documentation](https://docs.pi-hole.net/guides/vpn/wireguard/overview/)
### Pros:
### Pros
- Simple to set up and very secure.
- Single point of potential failure, i.e., the VPN software itself. Even if there is a zero-day vulnerability on Immich, you will not be at risk.
- Both Wireguard and OpenVPN are independently security-audited, so the risk of serious zero-day exploits are minimal.
### Cons:
### Cons
- If you don't have a static IP address, you would need to set up a [Dynamic DNS](https://www.cloudflare.com/learning/dns/glossary/dynamic-dns/). [DuckDNS](https://www.duckdns.org/) is a free DDNS provider.
- VPN software needs to be installed and active on both server-side and client-side.
@ -27,6 +27,10 @@ You may use a VPN service to open an encrypted connection to your Immich instanc
If you are unable to open a port on your router for Wireguard or OpenVPN to your server, [Tailscale](https://tailscale.com/) is a good option. Tailscale mediates a peer-to-peer wireguard tunnel between your server and remote device, even if one or both of them are behind a [NAT firewall](https://en.wikipedia.org/wiki/Network_address_translation).
:::tip Video tutorial
You can learn how to set up Tailscale together with Immich with the [tutorial video](https://www.youtube.com/watch?v=Vt4PDUXB_fg) they created.
:::
### Pros
- Minimal configuration needed on server and client sides.
@ -44,7 +48,7 @@ A reverse proxy is a service that sits between web servers and clients. A revers
If you're hosting your own reverse proxy, [Nginx](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/) is a great option. An example configuration for Nginx is provided [here](/docs/administration/reverse-proxy.md).
You'll also need your own certificate to authenticate https connections. If you're making Immich publicly accesible, [Let's Encrypt](https://letsencrypt.org/) can provide a free certificate for your domain and is the recommended option. Alternatively, a [self-signed certificate](https://en.wikipedia.org/wiki/Self-signed_certificate) allows you to encrypt your connection to Immich, but it raises a security warning on the client's browser.
You'll also need your own certificate to authenticate https connections. If you're making Immich publicly accessible, [Let's Encrypt](https://letsencrypt.org/) can provide a free certificate for your domain and is the recommended option. Alternatively, a [self-signed certificate](https://en.wikipedia.org/wiki/Self-signed_certificate) allows you to encrypt your connection to Immich, but it raises a security warning on the client's browser.
A remote reverse proxy like [Cloudflare](https://www.cloudflare.com/learning/cdn/glossary/reverse-proxy/) increases security by hiding the server IP address, which makes targeted attacks like [DDoS](https://www.cloudflare.com/learning/ddos/what-is-a-ddos-attack/) harder.

View File

@ -1,14 +1,20 @@
# Remote Machine Learning
To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine-learning container on a more powerful system (e.g. your laptop or desktop computer):
- Set the URL in Machine Learning Settings on the Admin Settings page to point to the designated ML system, e.g. `http://workstation:3003`.
- Copy the following `docker-compose.yml` to your ML system.
- If using [hardware acceleration](/docs/features/ml-hardware-acceleration), the [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be added
- Start the container by running `docker compose up -d`.
To alleviate [performance issues on low-memory systems](/docs/FAQ.mdx#why-is-immich-slow-on-low-memory-systems-like-the-raspberry-pi) like the Raspberry Pi, you may also host Immich's machine learning container on a more powerful system, such as your laptop or desktop computer. The server container will send requests containing the image preview to the remote machine learning container for processing. The machine learning container does not persist this data or associate it with a particular user.
:::info
Smart Search and Face Detection will use this feature, but Facial Recognition is handled in the server.
Smart Search and Face Detection will use this feature, but Facial Recognition will not. This is because Facial Recognition uses the _outputs_ of these models that have already been saved to the database. As such, its processing is between the server container and the database.
:::
:::danger
Image previews are sent to the remote machine learning container. Use this option carefully when running this on a public computer or a paid processing cloud. Additionally, as an internal service, the machine learning container has no security measures whatsoever. Please be mindful of where it's deployed and who can access it.
:::
1. Ensure the remote server has Docker installed
2. Copy the following `docker-compose.yml` to the remote server
:::info
If using hardware acceleration, the [hwaccel.ml.yml](https://github.com/immich-app/immich/releases/latest/download/hwaccel.ml.yml) file also needs to be added and the `docker-compose.yml` needs to be configured as described in the [hardware acceleration documentation](/docs/features/ml-hardware-acceleration)
:::
```yaml
@ -33,8 +39,26 @@ volumes:
model-cache:
```
Please note that version mismatches between both hosts may cause instabilities and bugs, so make sure to always perform updates together.
3. Start the remote machine learning container by running `docker compose up -d`
:::caution
As an internal service, the machine learning container has no security measures whatsoever. Please be mindful of where it's deployed and who can access it.
:::info
Version mismatches between both hosts may cause bugs and instability, so remember to update this container as well when updating the local Immich instance.
:::
4. Navigate to the [Machine Learning Settings](https://my.immich.app/admin/system-settings?isOpen=machine-learning)
5. Click _Add URL_
6. Fill the new field with the URL to the remote machine learning container, e.g. `http://ip:port`
## Forcing remote processing
Adding a new URL to the settings is recommended over replacing the existing URL. This is because it will allow machine learning tasks to be processed successfully when the remote server is down by falling back to the local machine learning container. If you do not want machine learning tasks to be processed locally when the remote server is not available, you can instead replace the existing URL and only provide the remote container's URL. If doing this, you can remove the `immich-machine-learning` section of the local `docker-compose.yml` file to save resources, as this service will never be used.
Do note that this will mean that Smart Search and Face Detection jobs will fail to be processed when the remote instance is not available. This in turn means that tasks dependent on these features—Duplicate Detection and Facial Recognition—will not run for affected assets. If this occurs, you must manually click the _Missing_ button next to Smart Search and Face Detection in the [Job Status](http://my.immich.app/admin/jobs-status) page for the jobs to be retried.
## Load balancing
While several URLs can be provided in the settings, they are tried sequentially; there is no attempt to distribute load across multiple containers. It is recommended to use a dedicated load balancer for such use-cases and specify it as the only URL. Among other things, it may enable the use of different APIs on the same server by running multiple containers with different configurations. For example, one might run an OpenVINO container in addition to a CUDA container, or run a standard release container to maximize both CPU and GPU utilization.
:::tip
The machine learning container can be shared among several Immich instances regardless of the models a particular instance uses. However, using different models will lead to higher peak memory usage.
:::

View File

@ -0,0 +1,19 @@
# Scaling Immich
Immich is built with modern deployment practices in mind, and the backend is designed to be able to run multiple instances in parallel. When doing this, the only requirement you need to be aware of is that every instance needs to be connected to the shared infrastructure. That means they should all have access to the same Postgres and Redis instances, and have the same files mounted into the containers.
Scaling can be useful for many reasons. Maybe you have a gaming PC that you want to use for transcoding and thumbnail generation, or perhaps you run a Kubernetes cluster across a handful of powerful servers that you want to make use of.
:::info
If you only have a single machine to run Immich on, scaling to multiple containers is unlikely to provide any benefit. An Immich container will run multiple background tasks at once, and you can increase their number from the admin panel.
:::
The details of how to scale across multiple machines will vary widely between different environments and require some knowledge to set up, and as such this guide gives no specific instructions. In some cases scaling up can be as easy as incrementing the amount of replicas on a Kubernetes deployment, in others it might need you to configure network tunnels or NFS mounts. The details are left as an exercise for the reader ;)
## Workers
By default, each running `immich-server` container comes with multiple internal workers. If you're scaling up only to handle more background tasks, you can choose to disable the worker responsible for the API. See [workers](../administration/jobs-workers.md) for more detail.
## Scaling down
In the same way you can scale up to multiple containers, you can also choose to scale down. All state is stored in Postgres, Redis, and the filesystem so there is no risk in stopping a running immich-server container, for example if you want to use your GPU to play some games. As long as there is an API worker running you will still be able to browse Immich, and jobs will wait to be processed until there is a worker available for them.

View File

@ -6,10 +6,19 @@ This script assumes you have a second hard drive connected to your server for on
The database is saved to your Immich upload folder in the `database-backup` subdirectory. The database is then backed up and versioned with your assets by Borg. This ensures that the database backup is in sync with your assets in every snapshot.
:::info
This script makes backups of your database along with your photo/video library. This is redundant with the [automatic database backup tool](https://immich.app/docs/administration/backup-and-restore#automatic-database-backups) built into Immich. Using this script to backup your database has two advantages over the built-in backup tool:
- This script uses storage more efficiently by versioning your backups instead of making multiple copies.
- The database backups are performed at the same time as the library backup, ensuring that the backups of your database and the library are always in sync.
If you are using this script, it is therefore safe to turn off the built-in automatic database backups from your admin panel to save storage space.
:::
### Prerequisites
- Borg needs to be installed on your server as well as the remote machine. You can find instructions to install Borg [here](https://borgbackup.readthedocs.io/en/latest/installation.html).
- (Optional) To run this sript as a non-root user, you should [add your username to the docker group](https://docs.docker.com/engine/install/linux-postinstall/).
- (Optional) To run this script as a non-root user, you should [add your username to the docker group](https://docs.docker.com/engine/install/linux-postinstall/).
- To run this script non-interactively, set up [passwordless ssh](https://www.redhat.com/sysadmin/passwordless-ssh) to your remote machine from your server. If you skipped the previous step, make sure this step is done from your root account.
To initialize the borg repository, run the following commands once.
@ -78,4 +87,4 @@ borg mount "$REMOTE_HOST:$REMOTE_BACKUP_PATH"/immich-borg /tmp/immich-mountpoint
cd /tmp/immich-mountpoint
```
You can find available snapshots in seperate sub-directories at `/tmp/immich-mountpoint`. Restore the files you need, and unmount the Borg repository using `borg umount /tmp/immich-mountpoint`
You can find available snapshots in separate sub-directories at `/tmp/immich-mountpoint`. Restore the files you need, and unmount the Borg repository using `borg umount /tmp/immich-mountpoint`

View File

@ -19,20 +19,28 @@ The default configuration looks like this:
"targetVideoCodec": "h264",
"acceptedVideoCodecs": ["h264"],
"targetAudioCodec": "aac",
"acceptedAudioCodecs": ["aac", "mp3", "libopus"],
"acceptedAudioCodecs": ["aac", "mp3", "libopus", "pcm_s16le"],
"acceptedContainers": ["mov", "ogg", "webm"],
"targetResolution": "720",
"maxBitrate": "0",
"bframes": -1,
"refs": 0,
"gopSize": 0,
"npl": 0,
"temporalAQ": false,
"cqMode": "auto",
"twoPass": false,
"preferredHwDevice": "auto",
"transcode": "required",
"tonemap": "hable",
"accel": "disabled"
"accel": "disabled",
"accelDecode": false
},
"backup": {
"database": {
"enabled": true,
"cronExpression": "0 02 * * *",
"keepLastAmount": 14
}
},
"job": {
"backgroundTask": {
@ -60,10 +68,13 @@ The default configuration looks like this:
"concurrency": 5
},
"thumbnailGeneration": {
"concurrency": 5
"concurrency": 3
},
"videoConversion": {
"concurrency": 1
},
"notifications": {
"concurrency": 5
}
},
"logging": {
@ -72,46 +83,52 @@ The default configuration looks like this:
},
"machineLearning": {
"enabled": true,
"url": "http://immich-machine-learning:3003",
"url": ["http://immich-machine-learning:3003"],
"clip": {
"enabled": true,
"modelName": "ViT-B-32__openai"
},
"duplicateDetection": {
"enabled": false,
"maxDistance": 0.03
"enabled": true,
"maxDistance": 0.01
},
"facialRecognition": {
"enabled": true,
"modelName": "buffalo_l",
"minScore": 0.7,
"maxDistance": 0.6,
"maxDistance": 0.5,
"minFaces": 3
}
},
"map": {
"enabled": true,
"lightStyle": "",
"darkStyle": ""
"lightStyle": "https://tiles.immich.cloud/v1/style/light.json",
"darkStyle": "https://tiles.immich.cloud/v1/style/dark.json"
},
"reverseGeocoding": {
"enabled": true
},
"metadata": {
"faces": {
"import": false
}
},
"oauth": {
"enabled": false,
"issuerUrl": "",
"autoLaunch": false,
"autoRegister": true,
"buttonText": "Login with OAuth",
"clientId": "",
"clientSecret": "",
"defaultStorageQuota": 0,
"enabled": false,
"issuerUrl": "",
"mobileOverrideEnabled": false,
"mobileRedirectUri": "",
"scope": "openid email profile",
"signingAlgorithm": "RS256",
"profileSigningAlgorithm": "none",
"storageLabelClaim": "preferred_username",
"storageQuotaClaim": "immich_quota",
"defaultStorageQuota": 0,
"buttonText": "Login with OAuth",
"autoRegister": true,
"autoLaunch": false,
"mobileOverrideEnabled": false,
"mobileRedirectUri": ""
"storageQuotaClaim": "immich_quota"
},
"passwordLogin": {
"enabled": true
@ -122,11 +139,16 @@ The default configuration looks like this:
"template": "{{y}}/{{y}}-{{MM}}-{{dd}}/{{filename}}"
},
"image": {
"thumbnailFormat": "webp",
"thumbnailSize": 250,
"previewFormat": "jpeg",
"previewSize": 1440,
"quality": 80,
"thumbnail": {
"format": "webp",
"size": 250,
"quality": 80
},
"preview": {
"format": "jpeg",
"size": 1440,
"quality": 80
},
"colorspace": "p3",
"extractEmbedded": false
},
@ -140,23 +162,35 @@ The default configuration looks like this:
"theme": {
"customCss": ""
},
"user": {
"deleteDelay": 7
},
"library": {
"scan": {
"enabled": true,
"cronExpression": "0 0 * * *"
},
"watch": {
"enabled": false,
"usePolling": false,
"interval": 10000
"enabled": false
}
},
"server": {
"externalDomain": "",
"loginPageMessage": ""
},
"notifications": {
"smtp": {
"enabled": false,
"from": "",
"replyTo": "",
"transport": {
"ignoreCert": false,
"host": "",
"port": 587,
"username": "",
"password": ""
}
}
},
"user": {
"deleteDelay": 7
}
}
```

View File

@ -58,6 +58,7 @@ Optionally, you can enable hardware acceleration for machine learning and transc
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`.
- Set your timezone by uncommenting the `TZ=` line.
### Step 3 - Start the containers
@ -80,6 +81,10 @@ The Compose file './docker-compose.yml' is invalid because:
See the previous paragraph about installing from the official docker repository.
:::
:::info Health check start interval
If you get an error `can't set healthcheck.start_interval as feature require Docker Engine v25 or later`, it helps to comment out the line for `start_interval` in the `database` section of the `docker-compose.yml` file.
:::
:::tip
For more information on how to use the application, please refer to the [Post Installation](/docs/install/post-install.mdx) guide.
:::
@ -109,7 +114,7 @@ Immich is currently under heavy development, which means you can expect [breakin
[compose-file]: https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
[env-file]: https://github.com/immich-app/immich/releases/latest/download/example.env
[watchtower]: https://containrrr.dev/watchtower/
[breaking]: https://github.com/immich-app/immich/discussions?discussions_q=label%3Abreaking-change+sort%3Adate_created
[breaking]: https://github.com/immich-app/immich/discussions?discussions_q=label%3Achangelog%3Abreaking-change+sort%3Adate_created
[container-auth]: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry
[releases]: https://github.com/immich-app/immich/releases
[docker-repo]: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository

View File

@ -27,23 +27,14 @@ If this should not work, try running `docker compose up -d --force-recreate`.
These environment variables are used by the `docker-compose.yml` file and do **NOT** affect the containers directly.
:::
### Supported filesystems
The Immich Postgres database (`DB_DATA_LOCATION`) must be located on a filesystem that supports user/group
ownership and permissions (EXT2/3/4, ZFS, APFS, BTRFS, XFS, etc.). It will not work on any filesystem formatted in NTFS or ex/FAT/32.
It will not work in WSL (Windows Subsystem for Linux) when using a mounted host directory (commonly under `/mnt`).
If this is an issue, you can change the bind mount to a Docker volume instead.
Regardless of filesystem, it is not recommended to use a network share for your database location due to performance and possible data loss issues.
## General
| Variable | Description | Default | Containers | Workers |
| :---------------------------------- | :---------------------------------------------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | | server | microservices |
| `TZ` | Timezone | <sup>\*1</sup> | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*1</sup>⚠️ | `./upload`<sup>\*2</sup> | server | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
@ -51,17 +42,15 @@ Regardless of filesystem, it is not recommended to use a network share for your
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
| `IMMICH_IGNORE_MOUNT_CHECK_ERRORS` | See [System Integrity](/docs/administration/system-integrity) | | server | api, microservices |
\*1: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*2: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only need to be set if the Immich deployment method is changing.
:::tip
`TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
\*1: `TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
`TZ` is used by `exiftool` as a fallback in case the timezone cannot be determined from the image metadata. It is also used for logfile timestamps and cron job execution.
:::
\*2: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*3: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only need to be set if the Immich deployment method is changing.
## Workers
@ -79,7 +68,7 @@ Information on the current workers can be found [here](/docs/administration/jobs
| Variable | Description | Default |
| :------------ | :------------- | :----------------------------------------: |
| `IMMICH_HOST` | Listening host | `0.0.0.0` |
| `IMMICH_PORT` | Listening port | `3001` (server), `3003` (machine learning) |
| `IMMICH_PORT` | Listening port | `2283` (server), `3003` (machine learning) |
## Database
@ -125,7 +114,7 @@ When `DB_URL` is defined, the `DB_HOSTNAME`, `DB_PORT`, `DB_USERNAME`, `DB_PASSW
All `REDIS_` variables must be provided to all Immich workers, including `api` and `microservices`.
`REDIS_URL` must start with `ioredis://` and then include a `base64` encoded JSON string for the configuration.
More info can be found in the upstream [ioredis][redis-api] documentation.
More info can be found in the upstream [ioredis] documentation.
When `REDIS_URL` or `REDIS_SOCKET` are defined, the `REDIS_HOSTNAME`, `REDIS_PORT`, `REDIS_USERNAME`, `REDIS_PASSWORD`, and `REDIS_DBINDEX` variables are ignored.
:::
@ -159,26 +148,33 @@ Redis (Sentinel) URL example JSON before encoding:
## Machine Learning
| Variable | Description | Default | Containers |
| :----------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO image) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| Variable | Description | Default | Containers |
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
\*2: Since each process duplicates models in memory, changing this is not recommended unless you have abundant memory to go around.
\*3: For scenarios like HPA in K8S. https://github.com/immich-app/immich/discussions/12064
\*4: Using multiple GPUs requires `MACHINE_LEARNING_WORKERS` to be set greater than 1. A single device is assigned to each worker in round-robin priority.
:::info
Other machine learning parameters can be tuned from the admin UI.
@ -187,15 +183,10 @@ Other machine learning parameters can be tuned from the admin UI.
## Prometheus
| Variable | Description | Default | Containers | Workers |
| :----------------------------- | :-------------------------------------------------------------------------------------------- | :-----: | :--------- | :----------------- |
| `IMMICH_METRICS`<sup>\*1</sup> | Toggle all metrics (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_API_METRICS` | Toggle metrics for endpoints and response times (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_HOST_METRICS` | Toggle metrics for CPU and memory utilization for host and process (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_IO_METRICS` | Toggle metrics for database queries, image processing, etc. (one of [`true`, `false`]) | | server | api, microservices |
| `IMMICH_JOB_METRICS` | Toggle metrics for jobs and queues (one of [`true`, `false`]) | | server | api, microservices |
\*1: Overridden for a metric group when its corresponding environmental variable is set.
| Variable | Description | Default | Containers | Workers |
| :------------------------- | :-------------------------------------------------------------------------------------------------------------------- | :-----: | :--------- | :----------------- |
| `IMMICH_TELEMETRY_INCLUDE` | Collect these telemetries. List of `host`, `api`, `io`, `repo`, `job`. Note: You can also specify `all` to enable all | | server | api, microservices |
| `IMMICH_TELEMETRY_EXCLUDE` | Do not collect these telemetries. List of `host`, `api`, `io`, `repo`, `job` | | server | api, microservices |
## Docker Secrets
@ -223,4 +214,4 @@ to use use a Docker secret for the password in the Redis container.
[docker-secrets-example]: https://github.com/docker-library/redis/issues/46#issuecomment-335326234
[docker-secrets-docs]: https://github.com/docker-library/docs/tree/master/postgres#docker-secrets
[docker-secrets]: https://docs.docker.com/engine/swarm/secrets/
[redis-api]: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
[ioredis]: https://ioredis.readthedocs.io/en/latest/README/#connect-to-redis

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 9.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

After

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.0 KiB

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.0 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.2 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Some files were not shown because too many files have changed in this diff Show More