remove deprecated Docker configurations and scripts for various services
This commit is contained in:
parent
e6dfc2aba4
commit
ff99e5e46f
@ -28,7 +28,7 @@
|
||||
25565 # Minecraft
|
||||
3456 # Minecraft (Bluemap)
|
||||
|
||||
# Interal services
|
||||
5334 # Duplicati Notifications
|
||||
81 # Nginx Proxy Manager
|
||||
7788 # Sabnzbd
|
||||
8085 # Qbittorrent
|
||||
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/arr-stack/docker-compose.yml".source = ./arr-stack/docker-compose.yml;
|
||||
|
||||
systemd.services.arr-stack = {
|
||||
description = "arr-stack Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/arr-stack/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/arr-stack/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/arr-stack";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
name: arr-stack
|
||||
services:
|
||||
radarr:
|
||||
container_name: radarr
|
||||
image: lscr.io/linuxserver/radarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 7878:7878
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- ./data/radarr-config:/config
|
||||
- /mnt:/storage
|
||||
restart: "unless-stopped"
|
||||
|
||||
sonarr:
|
||||
image: linuxserver/sonarr:latest
|
||||
container_name: sonarr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- ./data/sonarr-config:/config
|
||||
- /mnt:/storage
|
||||
ports:
|
||||
- 8989:8989
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
|
||||
lidarr:
|
||||
image: linuxserver/lidarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 8686:8686
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- ./data/lidarr-config:/config
|
||||
- /mnt:/storage
|
||||
restart: unless-stopped
|
||||
|
||||
whisparr:
|
||||
image: ghcr.io/hotio/whisparr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 8386:8686
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- ./data/whisparr-config:/config
|
||||
- /mnt:/storage
|
||||
restart: unless-stopped
|
||||
|
||||
prowlarr:
|
||||
container_name: prowlarr
|
||||
image: linuxserver/prowlarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- ./data/prowlarr-config:/config
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
ports:
|
||||
- 9696:9696
|
||||
restart: unless-stopped
|
||||
|
||||
flaresolverr:
|
||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||
container_name: flaresolverr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- LOG_HTML=${LOG_HTML:-false}
|
||||
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- "8191:8191"
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
|
||||
overseerr:
|
||||
image: lscr.io/linuxserver/overseerr:latest
|
||||
container_name: overseerr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- ./data/overseerr-config:/config
|
||||
ports:
|
||||
- 5555:5055
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
@ -1,17 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ];
|
||||
}
|
||||
# TODO: Import all the package modules, disabled for testing one by one.
|
||||
# { config, pkgs, ... }:
|
||||
|
||||
# let
|
||||
# files = builtins.removeAttrs (builtins.readDir ./.) [ "default.nix" ];
|
||||
|
||||
# # Import all other .nix files as modules
|
||||
# moduleFiles = builtins.map (fname: ./. + "/${fname}") (builtins.attrNames files);
|
||||
# in
|
||||
# {
|
||||
# # Import all the package modules
|
||||
# imports = moduleFiles;
|
||||
# }
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/duplicati/docker-compose.yml".source = ./duplicati/docker-compose.yml;
|
||||
|
||||
systemd.services.duplicati = {
|
||||
description = "Duplicati Backup Server Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/duplicati/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/duplicati/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/duplicati";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
name: duplicati
|
||||
services:
|
||||
app:
|
||||
image: lscr.io/linuxserver/duplicati:latest
|
||||
environment:
|
||||
- PUID=1
|
||||
- PGID=1
|
||||
- TZ=Europe/Amsterdam
|
||||
- CLI_ARGS=
|
||||
volumes:
|
||||
- ./config:/config
|
||||
- /mnt:/mnt
|
||||
ports:
|
||||
- 8200:8200
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
|
||||
notifications:
|
||||
image: ghcr.io/vleeuwenmenno/duplicati-discord-notification:main
|
||||
ports:
|
||||
- 5334:5000
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
|
||||
|
||||
|
||||
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/factorio/docker-compose.yml".source = ./factorio/docker-compose.yml;
|
||||
|
||||
systemd.services.factorio = {
|
||||
description = "Factorio Server Manager Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/factorio/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/factorio/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/factorio";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
name: factorio
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 34197, "protocol": "udp"}
|
||||
]
|
||||
|
||||
server-manager:
|
||||
image: "ofsm/ofsm:latest"
|
||||
container_name: "factorio-server-manager"
|
||||
restart: "unless-stopped"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- "FACTORIO_VERSION=stable"
|
||||
- "RCON_PASS=458fc84534"
|
||||
ports:
|
||||
- "5080:80"
|
||||
- "34197:34197/udp"
|
||||
volumes:
|
||||
- "./data/fsm:/opt/fsm-data"
|
||||
- "./data/saves:/opt/factorio/saves"
|
||||
- "./data/mods:/opt/factorio/mods"
|
||||
- "./data/config:/opt/factorio/config"
|
||||
- "./data/mod_packs:/opt/fsm/mod_packs"
|
@ -1,20 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/gitea/docker-compose.yml".source = ./gitea/docker-compose.yml;
|
||||
environment.etc."docker/gitea/act-runner-config.yaml".source = ./gitea/act-runner-config.yaml;
|
||||
|
||||
systemd.services.gitea = {
|
||||
description = "Gitea Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/gitea/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/gitea/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/gitea";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||
|
||||
# You don't have to copy this file to your instance,
|
||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
||||
|
||||
log:
|
||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
level: info
|
||||
|
||||
runner:
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
# Extra environment variables to run jobs from a file.
|
||||
# It will be ignored if it's empty or the file doesn't exist.
|
||||
env_file: .env
|
||||
# The timeout for a job to be finished.
|
||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
timeout: 3h
|
||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
insecure: false
|
||||
# The timeout for fetching the job from the Gitea instance.
|
||||
fetch_timeout: 5s
|
||||
# The interval for fetching the job from the Gitea instance.
|
||||
fetch_interval: 2s
|
||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
|
||||
# If it's empty when registering, it will ask for inputting labels.
|
||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
labels:
|
||||
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 0
|
||||
# The external cache server URL. Valid only when enable is true.
|
||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
# The URL should generally end with "/".
|
||||
external_server: ""
|
||||
|
||||
container:
|
||||
# Specifies the network to which the container will connect.
|
||||
# Could be host, bridge or the name of a custom network.
|
||||
# If it's empty, act_runner will create a network automatically.
|
||||
network: ""
|
||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||
privileged: false
|
||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||
options:
|
||||
# The parent directory of a job's working directory.
|
||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
||||
# If the path starts with '/', the '/' will be trimmed.
|
||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
||||
# If it's empty, /workspace will be used.
|
||||
workdir_parent:
|
||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||
# valid_volumes:
|
||||
# - data
|
||||
# - /src/*.json
|
||||
# If you want to allow any volume, please use the following configuration:
|
||||
# valid_volumes:
|
||||
# - '**'
|
||||
valid_volumes: []
|
||||
# overrides the docker client host with the specified one.
|
||||
# If it's empty, act_runner will find an available docker host automatically.
|
||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
docker_host: ""
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: true
|
||||
# Rebuild docker image(s) even if already present
|
||||
force_rebuild: false
|
||||
|
||||
host:
|
||||
# The parent directory of a job's working directory.
|
||||
# If it's empty, $HOME/.cache/act/ will be used.
|
||||
workdir_parent:
|
@ -1,62 +0,0 @@
|
||||
name: gittea
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 22, "protocol": "tcp"},
|
||||
{"port": 22, "protocol": "udp"}
|
||||
]
|
||||
|
||||
server:
|
||||
image: gitea/gitea:latest
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /mnt/services/gitea/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3030:3000"
|
||||
- "22:22"
|
||||
networks:
|
||||
- net
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD=gitea
|
||||
- POSTGRES_DB=gitea
|
||||
volumes:
|
||||
- /mnt/services/gitea/postgres:/var/lib/postgresql/data
|
||||
networks:
|
||||
- net
|
||||
|
||||
act_runner:
|
||||
image: gitea/act_runner:latest
|
||||
volumes:
|
||||
- ./act-runner-config.yaml:/config.yaml
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- GITEA_INSTANCE_URL=https://git.mvl.sh
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN=lIlte9POlu7aBanhCh3Xm1SPfohrexyfxqs9Yiqz
|
||||
- GITEA_RUNNER_NAME=act-worker
|
||||
- CONFIG_FILE=/config.yaml
|
||||
restart: always
|
||||
networks:
|
||||
- net
|
||||
|
||||
networks:
|
||||
net:
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
environment.etc."docker/golink/docker-compose.yml".source = ./golink/docker-compose.yml;
|
||||
environment.etc."docker/golink/.env".source = ./golink/.env;
|
||||
|
||||
systemd.services.golink = {
|
||||
description = "GoLink Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/golink/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/golink/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/golink";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1 +0,0 @@
|
||||
TS_AUTHKEY=
|
@ -1,11 +0,0 @@
|
||||
name: golink
|
||||
services:
|
||||
server:
|
||||
image: ghcr.io/tailscale/golink:main
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TS_AUTHKEY=${TS_AUTHKEY}
|
||||
volumes:
|
||||
- /mnt/services/golink:/home/nonroot
|
||||
restart: "unless-stopped"
|
@ -1,22 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/immich/docker-compose.yml".source = ./immich/docker-compose.yml;
|
||||
environment.etc."docker/immich/.env".source = ./immich/.env;
|
||||
environment.etc."docker/immich/hwaccel.ml.yml".source = ./immich/hwaccel.ml.yml;
|
||||
environment.etc."docker/immich/hwaccel.transcoding.yml".source = ./immich/hwaccel.transcoding.yml;
|
||||
|
||||
systemd.services.immich = {
|
||||
description = "Immich Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/immich/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/immich/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/immich";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
|
||||
|
||||
# The location where your uploaded files are stored
|
||||
UPLOAD_LOCATION=/mnt/8tb/Photos/immich-library
|
||||
# The location where your database files are stored
|
||||
DB_DATA_LOCATION=./data/postgres
|
||||
|
||||
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
TZ=Europe/Amsterdam
|
||||
|
||||
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
|
||||
IMMICH_VERSION=release
|
||||
|
||||
# Connection secret for postgres. You should change it to a random password
|
||||
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
|
||||
DB_PASSWORD=postgres
|
||||
|
||||
# The values below this line do not need to be changed
|
||||
###################################################################################
|
||||
DB_USERNAME=postgres
|
||||
DB_DATABASE_NAME=immich
|
@ -1,84 +0,0 @@
|
||||
name: immich
|
||||
services:
|
||||
server:
|
||||
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||
extends:
|
||||
file: hwaccel.transcoding.yml
|
||||
service: nvenc # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||
volumes:
|
||||
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- '2283:2283'
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
machine-learning:
|
||||
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
|
||||
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
||||
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
|
||||
extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
|
||||
file: hwaccel.ml.yml
|
||||
service: cuda # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
||||
volumes:
|
||||
- model-cache:/cache
|
||||
env_file:
|
||||
- .env
|
||||
restart: always
|
||||
healthcheck:
|
||||
disable: false
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: always
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
environment:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
|
||||
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
restart: always
|
||||
|
||||
volumes:
|
||||
model-cache:
|
@ -1,27 +0,0 @@
|
||||
# Configurations for hardware-accelerated machine learning
|
||||
|
||||
# If using Unraid or another platform that doesn't allow multiple Compose files,
|
||||
# you can inline the config for a backend by copying its contents
|
||||
# into the immich-machine-learning service in the docker-compose.yml file.
|
||||
|
||||
# See https://immich.app/docs/features/ml-hardware-acceleration for info on usage.
|
||||
|
||||
services:
|
||||
armnn:
|
||||
devices:
|
||||
- /dev/mali0:/dev/mali0
|
||||
volumes:
|
||||
- /lib/firmware/mali_csffw.bin:/lib/firmware/mali_csffw.bin:ro # Mali firmware for your chipset (not always required depending on the driver)
|
||||
- /usr/lib/libmali.so:/usr/lib/libmali.so:ro # Mali driver for your chipset (always required)
|
||||
|
||||
cpu: {}
|
||||
|
||||
cuda:
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities:
|
||||
- gpu
|
@ -1,43 +0,0 @@
|
||||
# Configurations for hardware-accelerated transcoding
|
||||
|
||||
# If using Unraid or another platform that doesn't allow multiple Compose files,
|
||||
# you can inline the config for a backend by copying its contents
|
||||
# into the immich-microservices service in the docker-compose.yml file.
|
||||
|
||||
# See https://immich.app/docs/features/hardware-transcoding for more info on using hardware transcoding.
|
||||
|
||||
services:
|
||||
cpu: {}
|
||||
|
||||
nvenc:
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities:
|
||||
- gpu
|
||||
- compute
|
||||
- video
|
||||
|
||||
rkmpp:
|
||||
security_opt: # enables full access to /sys and /proc, still far better than privileged: true
|
||||
- systempaths=unconfined
|
||||
- apparmor=unconfined
|
||||
group_add:
|
||||
- video
|
||||
devices:
|
||||
- /dev/rga:/dev/rga
|
||||
- /dev/dri:/dev/dri
|
||||
- /dev/dma_heap:/dev/dma_heap
|
||||
- /dev/mpp_service:/dev/mpp_service
|
||||
#- /dev/mali0:/dev/mali0 # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
|
||||
volumes:
|
||||
#- /etc/OpenCL:/etc/OpenCL:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
|
||||
#- /usr/lib/aarch64-linux-gnu/libmali.so.1:/usr/lib/aarch64-linux-gnu/libmali.so.1:ro # only required to enable OpenCL-accelerated HDR -> SDR tonemapping
|
||||
|
||||
vaapi:
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
|
@ -1,20 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/minecraft/docker-compose.yml".source = ./minecraft/docker-compose.yml;
|
||||
environment.etc."docker/minecraft/shell.sh".source = ./minecraft/shell.sh;
|
||||
|
||||
systemd.services.minecraft = {
|
||||
description = "minecraft Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/minecraft/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/minecraft/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/minecraft";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
name: minecraft
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 25565, "protocol": "tcp"},
|
||||
{"port": 25565, "protocol": "udp"},
|
||||
{"port": 24454, "protocol": "udp"},
|
||||
{"port": 3456, "protocol": "tcp"},
|
||||
{"port": 19132, "protocol": "udp"}
|
||||
]
|
||||
|
||||
paper:
|
||||
image: itzg/minecraft-server
|
||||
tty: true
|
||||
stdin_open: true
|
||||
ports:
|
||||
- "25565:25565/tcp"
|
||||
- "24454:24454/udp"
|
||||
- "19132:19132/udp"
|
||||
- "3456:8100/tcp"
|
||||
environment:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
EULA: "TRUE"
|
||||
TYPE: "paper"
|
||||
VERSION: 1.21.1
|
||||
DIFFICULTY: "hard"
|
||||
SERVER_NAME: "Paper Mostly Vanilla Server"
|
||||
MOTD: "Paper Server (Supports 1.20.x and newer!)"
|
||||
MEMORY: "32G"
|
||||
MAX_PLAYERS: 32
|
||||
VIEW_DISTANCE: 32
|
||||
SPAWN_MONSTERS: true
|
||||
SPAWN_ANIMALS: true
|
||||
ENFORCE_SECURE_PROFILE: false
|
||||
|
||||
PLUGINS: |
|
||||
https://cdn.modrinth.com/data/Jrmoreqs/versions/Ch2Vh0XL/AdvancedBackups-spigot-1.21-3.6.3.jar
|
||||
https://cdn.modrinth.com/data/9eGKb6K1/versions/tA5pALYl/voicechat-bukkit-2.5.25.jar
|
||||
https://cdn.modrinth.com/data/eBqOQXoA/versions/ndMZChDv/RecoveryTotem-1.0.1.jar
|
||||
https://cdn.modrinth.com/data/fALzjamp/versions/ytBhnGfO/Chunky-Bukkit-1.4.28.jar
|
||||
https://cdn.modrinth.com/data/P1OZGk5p/versions/ffAFJrjN/ViaVersion-5.1.1.jar
|
||||
https://cdn.modrinth.com/data/NpvuJQoq/versions/kwAAl5BS/ViaBackwards-5.1.1.jar
|
||||
https://cdn.modrinth.com/data/wKkoqHrH/versions/ohEXB7mE/Geyser-Spigot.jar
|
||||
https://cdn.modrinth.com/data/Vebnzrzj/versions/cfNN7sys/LuckPerms-Bukkit-5.4.145.jar
|
||||
https://cdn.modrinth.com/data/swbUV1cr/versions/DB0OeC5p/bluemap-5.4-spigot.jar
|
||||
|
||||
OPS: |
|
||||
StarDebris
|
||||
|
||||
WHITELIST: |
|
||||
StarDebris
|
||||
Audi358
|
||||
TechnikTake
|
||||
MsPremium
|
||||
Barny_8874
|
||||
Ricky_2405
|
||||
KinderKiller3000
|
||||
ScherzkeksMiner
|
||||
PauBau
|
||||
QuickWitPhil
|
||||
Draxonix
|
||||
Zakomi
|
||||
skintsoldier122
|
||||
Krank4ever
|
||||
Benjilami
|
||||
Barny_8847
|
||||
Destination456
|
||||
xKizu
|
||||
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /mnt/services/minecraft:/data
|
@ -1,2 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
docker compose exec paper rcon-cli $@
|
@ -1,18 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
environment.etc."docker/nginx-proxy-manager/docker-compose.yml".source = ./nginx-proxy-manager/docker-compose.yml;
|
||||
|
||||
systemd.services.nginx-proxy-manager = {
|
||||
description = "nginx-proxy-manager Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/nginx-proxy-manager/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/nginx-proxy-manager/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/nginx-proxy-manager";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
name: nginx-proxy-manager
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 80, "protocol": "tcp"},
|
||||
{"port": 443, "protocol": "tcp"}
|
||||
]
|
||||
|
||||
server:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '80:80'
|
||||
- '81:81'
|
||||
- '443:443'
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /mnt/services/proxy/nginx-proxy-manager/data:/data
|
||||
- /mnt/services/proxy/nginx-proxy-manager/data/letsencrypt:/etc/letsencrypt
|
||||
- /mnt/services/proxy/nginx/snippets:/snippets:ro
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
|
||||
authelia:
|
||||
container_name: authelia
|
||||
image: authelia/authelia
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 9091:9091
|
||||
volumes:
|
||||
- /mnt/services/proxy/authelia/config:/config:ro
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: redis
|
||||
volumes:
|
||||
- /mnt/services/proxy/redis:/data
|
||||
expose:
|
||||
- 6379
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
|
||||
postgres:
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- POSTGRES_DB=authelia
|
||||
- POSTGRES_USER=authelia
|
||||
- POSTGRES_PASSWORD=authelia
|
||||
image: postgres:15.4-alpine
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /mnt/services/proxy/postgres:/var/lib/postgresql/data
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/plex/docker-compose.yml".source = ./plex/docker-compose.yml;
|
||||
|
||||
systemd.services.plex = {
|
||||
description = "plex Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/plex/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/plex/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/plex";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
services:
|
||||
plex:
|
||||
image: lscr.io/linuxserver/plex:latest
|
||||
container_name: plex
|
||||
network_mode: host
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- VERSION=docker
|
||||
- PLEX_CLAIM=claim-sfTz4AWc_Uxhzfzz9fKS
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
|
||||
volumes:
|
||||
- /mnt/services/plex/plex:/config
|
||||
- /mnt/movies:/movies
|
||||
- /mnt/tvshows:/tvshows
|
||||
- /mnt/music:/music
|
||||
restart: unless-stopped
|
||||
|
||||
tautulli:
|
||||
image: lscr.io/linuxserver/tautulli:latest
|
||||
container_name: tautulli
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- /mnt/services/plex/tautulli:/config
|
||||
ports:
|
||||
- 8181:8181
|
||||
restart: unless-stopped
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/sabnzbd/docker-compose.yml".source = ./sabnzbd/docker-compose.yml;
|
||||
|
||||
systemd.services.sabnzbd = {
|
||||
description = "sabnzbd Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/sabnzbd/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/sabnzbd/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/sabnzbd";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
services:
|
||||
sabnzbd:
|
||||
image: lscr.io/linuxserver/sabnzbd:latest
|
||||
container_name: sabnzbd
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- /mnt/services/sabnzbd:/config
|
||||
- /mnt:/storage
|
||||
ports:
|
||||
- 7788:8080
|
||||
restart: unless-stopped
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/satisfactory/docker-compose.yml".source = ./satisfactory/docker-compose.yml;
|
||||
|
||||
systemd.services.satisfactory = {
|
||||
description = "Satisfactory Game Server Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/satisfactory/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/satisfactory/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/satisfactory";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
name: satisfactory
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 7777, "protocol": "udp"},
|
||||
{"port": 15000, "protocol": "udp"},
|
||||
{"port": 15777, "protocol": "udp"},
|
||||
{"port": 27015, "protocol": "tcp"},
|
||||
{"port": 27015, "protocol": "udp"},
|
||||
{"port": 27031, "protocol": "udp"},
|
||||
{"port": 27032, "protocol": "udp"},
|
||||
{"port": 27033, "protocol": "udp"},
|
||||
{"port": 27034, "protocol": "udp"},
|
||||
{"port": 27035, "protocol": "udp"},
|
||||
{"port": 27036, "protocol": "tcp"},
|
||||
{"port": 27036, "protocol": "udp"}
|
||||
]
|
||||
|
||||
server:
|
||||
hostname: 'satisfactory-server'
|
||||
image: 'wolveix/satisfactory-server:latest'
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- './data/config:/config'
|
||||
- './data/certs/live/satisfactory.mvl.sh/fullchain.pem:/config/gamefiles/FactoryGame/Certificates/cert_chain.pem'
|
||||
- './data/certs/live/satisfactory.mvl.sh/privkey.pem:/config/gamefiles/FactoryGame/Certificates/private_key.pem'
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- MAXPLAYERS=4
|
||||
- ROOTLESS=false
|
||||
- STEAMBETA=false
|
||||
healthcheck:
|
||||
test: [ "CMD", "bash", "/healthcheck.sh" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 120s
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
memory: 4G
|
||||
limits:
|
||||
memory: 8G
|
||||
network_mode: 'host'
|
||||
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
command: certonly --standalone --non-interactive --agree-tos -m menno@vleeuwen.me -d satisfactory.mvl.sh
|
||||
# Uncomment this when requesting a new certificate, make sure to disable nginx-proxy-manager first since this conflicts with port 80
|
||||
# ports:
|
||||
# - '80:80/tcp'
|
||||
volumes:
|
||||
- ./data/certs:/etc/letsencrypt
|
||||
environment:
|
||||
- CERTBOT_MAIL=menno@vleeuwen.me
|
||||
- DOMAIN=satisfactory.mvl.sh
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/stash/docker-compose.yml".source = ./stash/docker-compose.yml;
|
||||
|
||||
systemd.services.stash = {
|
||||
description = "stash Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/stash/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/stash/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/stash";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
services:
|
||||
stash:
|
||||
image: stashapp/stash:latest
|
||||
container_name: stash
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9999:9999"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- STASH_STASH=/data/
|
||||
- STASH_GENERATED=/generated/
|
||||
- STASH_METADATA=/metadata/
|
||||
- STASH_CACHE=/cache/
|
||||
- STASH_PORT=9999
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
|
||||
## Keep configs, scrapers, and plugins here.
|
||||
- /mnt/services/stash/config:/root/.stash
|
||||
## Point this at your collection.
|
||||
- /mnt/stash:/data
|
||||
## This is where your stash's metadata lives
|
||||
- /mnt/services/stash/metadata:/metadata
|
||||
## Any other cache content.
|
||||
- /mnt/services/stash/cache:/cache
|
||||
## Where to store binary blob data (scene covers, images)
|
||||
- /mnt/services/stash/blobs:/blobs
|
||||
## Where to store generated content (screenshots,previews,transcodes,sprites)
|
||||
- /mnt/services/stash/generated:/generated
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/torrent/docker-compose.yml".source = ./torrent/docker-compose.yml;
|
||||
|
||||
systemd.services.torrent = {
|
||||
description = "Torrent Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/torrent/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/torrent/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/torrent";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
services:
|
||||
gluetun:
|
||||
image: qmcgaw/gluetun:latest
|
||||
container_name: gluetun
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
network_mode: bridge
|
||||
ports:
|
||||
- 6881:6881
|
||||
- 6881:6881/udp
|
||||
- 8085:8085
|
||||
volumes:
|
||||
- /mnt/services/torrent/gluetun:/gluetun
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
|
||||
- OPENVPN_USER=${OPENVPN_USER}
|
||||
- OPENVPN_PASSWORD=${OPENVPN_PASSWORD}
|
||||
- SERVER_COUNTRIES=${SERVER_COUNTRIES}
|
||||
restart: always
|
||||
|
||||
qbittorrent:
|
||||
image: lscr.io/linuxserver/qbittorrent
|
||||
container_name: qbittorrent
|
||||
network_mode: "service:gluetun"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- WEBUI_PORT=8085
|
||||
volumes:
|
||||
- "/etc/localtime:/etc/localtime:ro"
|
||||
- /mnt/services/torrent/qbit-config:/config
|
||||
- /mnt:/storage
|
||||
depends_on:
|
||||
- gluetun
|
||||
restart: always
|
@ -1,19 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.etc."docker/wireguard/docker-compose.yml".source = ./wireguard/docker-compose.yml;
|
||||
|
||||
systemd.services.wireguard = {
|
||||
description = "Wireguard Docker Compose Service";
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/wireguard/docker-compose.yml up";
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/wireguard/docker-compose.yml down";
|
||||
WorkingDirectory = "/etc/docker/wireguard";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
name: wireguard
|
||||
services:
|
||||
upnp:
|
||||
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
environment:
|
||||
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||
PORTS: |
|
||||
[
|
||||
{"port": 51820, "protocol": "udp"}
|
||||
]
|
||||
|
||||
server:
|
||||
image: lscr.io/linuxserver/wireguard:latest
|
||||
container_name: wireguard
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- PEERS=s24,pc,laptop
|
||||
volumes:
|
||||
- /mnt/services/wireguard/data:/config
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
sysctls:
|
||||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
restart: unless-stopped
|
Loading…
x
Reference in New Issue
Block a user