Signed-off-by: Menno van Leeuwen <menno@vleeuwen.me>
This commit is contained in:
2025-02-07 13:09:16 +01:00
parent c4685066f8
commit e9e3bcd53f
18 changed files with 158 additions and 90 deletions

View File

@@ -2,9 +2,9 @@ name: immich
services:
server:
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
#extends:
# file: hwaccel.transcoding.yml
# service: nvenc # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
extends:
file: hwaccel.transcoding.yml
service: nvenc # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
@@ -18,22 +18,21 @@ services:
environment:
- PUID=1000
- PGID=1000
restart: always
restart: unless-stopped
healthcheck:
disable: false
machine-learning:
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. machine-learning # Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
#extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cuda # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
file: hwaccel.ml.yml
service: cuda # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
restart: unless-stopped
healthcheck:
disable: false
@@ -42,7 +41,7 @@ services:
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
healthcheck:
test: redis-cli ping || exit 1
restart: always
restart: unless-stopped
database:
container_name: immich_postgres
@@ -78,7 +77,7 @@ services:
'-c',
'wal_compression=on',
]
restart: always
restart: unless-stopped
volumes:
model-cache:

View File

@@ -14,8 +14,9 @@ services:
resources:
reservations:
devices:
- driver: nvidia
count: 1
- driver: cdi
device_ids:
- nvidia.com/gpu=all
capabilities:
- gpu

View File

@@ -14,8 +14,9 @@ services:
resources:
reservations:
devices:
- driver: nvidia
count: 1
- driver: cdi
device_ids:
- nvidia.com/gpu=all
capabilities:
- gpu
- compute