Compare commits
44 Commits
5f83cf2e6d
...
09ad59d3c5
| Author | SHA1 | Date | |
|---|---|---|---|
|
09ad59d3c5
|
|||
|
b8adf7b200
|
|||
|
f1e00e4038
|
|||
|
9235298118
|
|||
|
e3f0493630
|
|||
|
0270ac41bd
|
|||
|
1e73386dca
|
|||
|
db89a244b0
|
|||
|
945196c8e0
|
|||
|
bbe516f998
|
|||
|
04c7850e6e
|
|||
|
9fbd69a25a
|
|||
|
9bd4259cf2
|
|||
|
39a62a239d
|
|||
|
6eeace47ec
|
|||
|
4c283bf58d
|
|||
|
2b07a58d7b
|
|||
|
3433cce92b
|
|||
|
1937bd9acf
|
|||
|
9339d653c2
|
|||
|
0483ac9a8d
|
|||
|
1c56aea8fb
|
|||
|
e546ecea9a
|
|||
|
8fde290f40
|
|||
|
9e38b4f794
|
|||
|
7fb331c404
|
|||
|
5d1fe879dd
|
|||
|
e75263b16e
|
|||
|
28422d460a
|
|||
|
25a070124d
|
|||
|
150847cbd8
|
|||
|
0af32e52ed
|
|||
|
4d6e69e9d0
|
|||
|
df06f221b8
|
|||
|
e719b0e693
|
|||
|
77b84107f6
|
|||
|
0a961ec53f
|
|||
|
d68fba4ba3
|
|||
|
93517fbf79
|
|||
|
0e619da207
|
|||
|
cc7686668c
|
|||
|
0c92e38370
|
|||
|
60f0ab11bd
|
|||
|
8638652839
|
1
config/autostart/ulauncher.desktop
Symbolic link
1
config/autostart/ulauncher.desktop
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
/nix/store/4jqpwdf5q8rh58a9vk80r4naaz9aic6z-home-manager-files/.config/autostart/ulauncher.desktop
|
||||||
@@ -61,6 +61,16 @@ let
|
|||||||
"run_without_argument" = false;
|
"run_without_argument" = false;
|
||||||
"added" = 0;
|
"added" = 0;
|
||||||
};
|
};
|
||||||
|
"43d1ed32-8fd3-fbf8-94f5-cffa7cd607a1" = {
|
||||||
|
"id" = "40d1ed32-8fd3-4bf8-92f5-cbaa7cd607a1";
|
||||||
|
"name" = "GitHub";
|
||||||
|
"keyword" = "gh";
|
||||||
|
"cmd" = "https://github.com/search?q=%s";
|
||||||
|
"icon" = null;
|
||||||
|
"is_default_search" = false;
|
||||||
|
"run_without_argument" = false;
|
||||||
|
"added" = 0;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Create a Python environment with all required packages
|
# Create a Python environment with all required packages
|
||||||
@@ -91,15 +101,13 @@ let
|
|||||||
'';
|
'';
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
# Extensions
|
# Extensions
|
||||||
# https://github.com/friday/ulauncher-gnome-settings
|
# https://github.com/friday/ulauncher-gnome-settings
|
||||||
# https://ext.ulauncher.io/-/github-ulauncher-ulauncher-emoji
|
# https://ext.ulauncher.io/-/github-ulauncher-ulauncher-emoji
|
||||||
# https://ext.ulauncher.io/-/github-tchar-ulauncher-albert-calculate-anything
|
# https://ext.ulauncher.io/-/github-tchar-ulauncher-albert-calculate-anything
|
||||||
# https://ext.ulauncher.io/-/github-isacikgoz-ukill
|
# https://ext.ulauncher.io/-/github-isacikgoz-ukill
|
||||||
# https://ext.ulauncher.io/-/github-iboyperson-ulauncher-system
|
# https://ext.ulauncher.io/-/github-iboyperson-ulauncher-system
|
||||||
#
|
|
||||||
#
|
|
||||||
#
|
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [
|
nixpkgs.overlays = [
|
||||||
(final: prev: { ulauncher = prev.ulauncher.override { python3 = pythonWithPackages; }; })
|
(final: prev: { ulauncher = prev.ulauncher.override { python3 = pythonWithPackages; }; })
|
||||||
|
|||||||
@@ -20,9 +20,17 @@
|
|||||||
networking.firewall = {
|
networking.firewall = {
|
||||||
enable = true;
|
enable = true;
|
||||||
allowedTCPPorts = [
|
allowedTCPPorts = [
|
||||||
# SSH
|
400 # SSH
|
||||||
400
|
80 # HTTP
|
||||||
|
443 # HTTPS
|
||||||
|
22 # Git over SSH
|
||||||
|
32400 # Plex
|
||||||
|
|
||||||
|
7788 # Sabnzbd
|
||||||
|
8085 # Qbittorrent
|
||||||
|
];
|
||||||
|
allowedUDPPorts = [
|
||||||
|
51820 # WireGuard
|
||||||
];
|
];
|
||||||
allowedUDPPorts = [ ];
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,9 +16,7 @@
|
|||||||
]
|
]
|
||||||
# Include packages based on whether this is a server or workstation.
|
# Include packages based on whether this is a server or workstation.
|
||||||
++ lib.optional isServer ./packages/server/default.nix
|
++ lib.optional isServer ./packages/server/default.nix
|
||||||
++ lib.optional isWorkstation ./packages/workstation/default.nix
|
++ lib.optional isWorkstation ./packages/workstation/default.nix;
|
||||||
# Include docker if this is a server, otherwise include nothing because we don't intend on running docker services on workstations.
|
|
||||||
++ lib.optional isServer ./docker/default.nix;
|
|
||||||
|
|
||||||
# Enable networking
|
# Enable networking
|
||||||
networking.networkmanager.enable = true;
|
networking.networkmanager.enable = true;
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ services:
|
|||||||
container_name: radarr
|
container_name: radarr
|
||||||
image: lscr.io/linuxserver/radarr:latest
|
image: lscr.io/linuxserver/radarr:latest
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
ports:
|
ports:
|
||||||
- 7878:7878
|
- 7878:7878
|
||||||
@@ -11,17 +13,19 @@ services:
|
|||||||
- host.docker.internal:host-gateway
|
- host.docker.internal:host-gateway
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/radarr-config:/config
|
- ./data/radarr-config:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
restart: "unless-stopped"
|
restart: "unless-stopped"
|
||||||
|
|
||||||
sonarr:
|
sonarr:
|
||||||
image: linuxserver/sonarr:latest
|
image: linuxserver/sonarr:latest
|
||||||
container_name: sonarr
|
container_name: sonarr
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/sonarr-config:/config
|
- ./data/sonarr-config:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
ports:
|
ports:
|
||||||
- 8989:8989
|
- 8989:8989
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
@@ -31,6 +35,8 @@ services:
|
|||||||
lidarr:
|
lidarr:
|
||||||
image: linuxserver/lidarr:latest
|
image: linuxserver/lidarr:latest
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
ports:
|
ports:
|
||||||
- 8686:8686
|
- 8686:8686
|
||||||
@@ -38,12 +44,14 @@ services:
|
|||||||
- host.docker.internal:host-gateway
|
- host.docker.internal:host-gateway
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/lidarr-config:/config
|
- ./data/lidarr-config:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
whisparr:
|
whisparr:
|
||||||
image: ghcr.io/hotio/whisparr:latest
|
image: ghcr.io/hotio/whisparr:latest
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
ports:
|
ports:
|
||||||
- 8386:8686
|
- 8386:8686
|
||||||
@@ -51,13 +59,15 @@ services:
|
|||||||
- host.docker.internal:host-gateway
|
- host.docker.internal:host-gateway
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/whisparr-config:/config
|
- ./data/whisparr-config:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
prowlarr:
|
prowlarr:
|
||||||
container_name: prowlarr
|
container_name: prowlarr
|
||||||
image: linuxserver/prowlarr:latest
|
image: linuxserver/prowlarr:latest
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/prowlarr-config:/config
|
- ./data/prowlarr-config:/config
|
||||||
@@ -71,6 +81,8 @@ services:
|
|||||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||||
container_name: flaresolverr
|
container_name: flaresolverr
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
- LOG_HTML=${LOG_HTML:-false}
|
- LOG_HTML=${LOG_HTML:-false}
|
||||||
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
|
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
|
||||||
|
|||||||
@@ -1,18 +1,17 @@
|
|||||||
{ ... }:
|
{ ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [ ];
|
||||||
./arr-stack.nix
|
|
||||||
./duplicati.nix
|
|
||||||
./factorio.nix
|
|
||||||
./gitea.nix
|
|
||||||
./golink.nix
|
|
||||||
./immich.nix
|
|
||||||
./minecraft.nix
|
|
||||||
./plex.nix
|
|
||||||
./sabnzbd.nix
|
|
||||||
./satisfactory.nix
|
|
||||||
./stash.nix
|
|
||||||
./torrent.nix
|
|
||||||
./wireguard.nix
|
|
||||||
];
|
|
||||||
}
|
}
|
||||||
|
# TODO: Import all the package modules, disabled for testing one by one.
|
||||||
|
# { config, pkgs, ... }:
|
||||||
|
|
||||||
|
# let
|
||||||
|
# files = builtins.removeAttrs (builtins.readDir ./.) [ "default.nix" ];
|
||||||
|
|
||||||
|
# # Import all other .nix files as modules
|
||||||
|
# moduleFiles = builtins.map (fname: ./. + "/${fname}") (builtins.attrNames files);
|
||||||
|
# in
|
||||||
|
# {
|
||||||
|
# # Import all the package modules
|
||||||
|
# imports = moduleFiles;
|
||||||
|
# }
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ services:
|
|||||||
app:
|
app:
|
||||||
image: lscr.io/linuxserver/duplicati:latest
|
image: lscr.io/linuxserver/duplicati:latest
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1
|
||||||
- PGID=1000
|
- PGID=1
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
- CLI_ARGS=
|
- CLI_ARGS=
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ services:
|
|||||||
container_name: "factorio-server-manager"
|
container_name: "factorio-server-manager"
|
||||||
restart: "unless-stopped"
|
restart: "unless-stopped"
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- "FACTORIO_VERSION=stable"
|
- "FACTORIO_VERSION=stable"
|
||||||
- "RCON_PASS=458fc84534"
|
- "RCON_PASS=458fc84534"
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -15,8 +15,11 @@ services:
|
|||||||
server:
|
server:
|
||||||
image: gitea/gitea:latest
|
image: gitea/gitea:latest
|
||||||
restart: always
|
restart: always
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/gittea:/data
|
- /mnt/services/gitea/gitea:/data
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /etc/timezone:/etc/timezone:ro
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
ports:
|
ports:
|
||||||
@@ -29,11 +32,13 @@ services:
|
|||||||
image: postgres:15-alpine
|
image: postgres:15-alpine
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- POSTGRES_USER=gitea
|
- POSTGRES_USER=gitea
|
||||||
- POSTGRES_PASSWORD=gitea
|
- POSTGRES_PASSWORD=gitea
|
||||||
- POSTGRES_DB=gitea
|
- POSTGRES_DB=gitea
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/postgres:/var/lib/postgresql/data
|
- /mnt/services/gitea/postgres:/var/lib/postgresql/data
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
|
|
||||||
@@ -43,6 +48,8 @@ services:
|
|||||||
- ./act-runner-config.yaml:/config.yaml
|
- ./act-runner-config.yaml:/config.yaml
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- GITEA_INSTANCE_URL=https://git.mvl.sh
|
- GITEA_INSTANCE_URL=https://git.mvl.sh
|
||||||
- GITEA_RUNNER_REGISTRATION_TOKEN=lIlte9POlu7aBanhCh3Xm1SPfohrexyfxqs9Yiqz
|
- GITEA_RUNNER_REGISTRATION_TOKEN=lIlte9POlu7aBanhCh3Xm1SPfohrexyfxqs9Yiqz
|
||||||
- GITEA_RUNNER_NAME=act-worker
|
- GITEA_RUNNER_NAME=act-worker
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ services:
|
|||||||
server:
|
server:
|
||||||
image: ghcr.io/tailscale/golink:main
|
image: ghcr.io/tailscale/golink:main
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TS_AUTHKEY=${TS_AUTHKEY}
|
- TS_AUTHKEY=${TS_AUTHKEY}
|
||||||
volumes:
|
volumes:
|
||||||
- ./data:/home/nonroot
|
- /mnt/services/golink:/home/nonroot
|
||||||
restart: "unless-stopped"
|
restart: "unless-stopped"
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- redis
|
- redis
|
||||||
- database
|
- database
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
restart: always
|
restart: always
|
||||||
healthcheck:
|
healthcheck:
|
||||||
disable: false
|
disable: false
|
||||||
@@ -45,6 +48,8 @@ services:
|
|||||||
container_name: immich_postgres
|
container_name: immich_postgres
|
||||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||||
environment:
|
environment:
|
||||||
|
PUID: 1000
|
||||||
|
PGID: 1000
|
||||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||||
POSTGRES_USER: ${DB_USERNAME}
|
POSTGRES_USER: ${DB_USERNAME}
|
||||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ services:
|
|||||||
- "19132:19132/udp"
|
- "19132:19132/udp"
|
||||||
- "3456:8100/tcp"
|
- "3456:8100/tcp"
|
||||||
environment:
|
environment:
|
||||||
|
PUID: 1000
|
||||||
|
PGID: 1000
|
||||||
EULA: "TRUE"
|
EULA: "TRUE"
|
||||||
TYPE: "paper"
|
TYPE: "paper"
|
||||||
VERSION: 1.21.1
|
VERSION: 1.21.1
|
||||||
@@ -74,4 +76,4 @@ services:
|
|||||||
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- ./data:/data
|
- /mnt/services/minecraft:/data
|
||||||
|
|||||||
18
config/nixos/docker/nginx-proxy-manager.nix
Normal file
18
config/nixos/docker/nginx-proxy-manager.nix
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
environment.etc."docker/nginx-proxy-manager/docker-compose.yml".source = ./nginx-proxy-manager/docker-compose.yml;
|
||||||
|
|
||||||
|
systemd.services.nginx-proxy-manager = {
|
||||||
|
description = "nginx-proxy-manager Docker Compose Service";
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/nginx-proxy-manager/docker-compose.yml up";
|
||||||
|
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker/nginx-proxy-manager/docker-compose.yml down";
|
||||||
|
WorkingDirectory = "/etc/docker/nginx-proxy-manager";
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = 10;
|
||||||
|
};
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
};
|
||||||
|
}
|
||||||
70
config/nixos/docker/nginx-proxy-manager/docker-compose.yml
Normal file
70
config/nixos/docker/nginx-proxy-manager/docker-compose.yml
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
name: nginx-proxy-manager
|
||||||
|
services:
|
||||||
|
upnp:
|
||||||
|
image: ghcr.io/vleeuwenmenno/auto-upnp:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
network_mode: host
|
||||||
|
environment:
|
||||||
|
UPNP_DURATION: 86400 # 24 hours in seconds
|
||||||
|
PORTS: |
|
||||||
|
[
|
||||||
|
{"port": 80, "protocol": "tcp"},
|
||||||
|
{"port": 443, "protocol": "tcp"}
|
||||||
|
]
|
||||||
|
|
||||||
|
server:
|
||||||
|
image: 'jc21/nginx-proxy-manager:latest'
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- '80:80'
|
||||||
|
- '81:81'
|
||||||
|
- '443:443'
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
volumes:
|
||||||
|
- /mnt/services/proxy/nginx-proxy-manager/data:/data
|
||||||
|
- /mnt/services/proxy/nginx-proxy-manager/data/letsencrypt:/etc/letsencrypt
|
||||||
|
- /mnt/services/proxy/nginx/snippets:/snippets:ro
|
||||||
|
extra_hosts:
|
||||||
|
- host.docker.internal:host-gateway
|
||||||
|
|
||||||
|
authelia:
|
||||||
|
container_name: authelia
|
||||||
|
image: authelia/authelia
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- 9091:9091
|
||||||
|
volumes:
|
||||||
|
- /mnt/services/proxy/authelia/config:/config:ro
|
||||||
|
extra_hosts:
|
||||||
|
- host.docker.internal:host-gateway
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- TZ=Europe/Amsterdam
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:alpine
|
||||||
|
container_name: redis
|
||||||
|
volumes:
|
||||||
|
- /mnt/services/proxy/redis:/data
|
||||||
|
expose:
|
||||||
|
- 6379
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- TZ=Europe/Amsterdam
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- POSTGRES_DB=authelia
|
||||||
|
- POSTGRES_USER=authelia
|
||||||
|
- POSTGRES_PASSWORD=authelia
|
||||||
|
image: postgres:15.4-alpine
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- /mnt/services/proxy/postgres:/var/lib/postgresql/data
|
||||||
@@ -12,24 +12,28 @@ services:
|
|||||||
capabilities: [gpu]
|
capabilities: [gpu]
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- VERSION=docker
|
- VERSION=docker
|
||||||
- PLEX_CLAIM=claim-sfTz4AWc_Uxhzfzz9fKS
|
- PLEX_CLAIM=claim-sfTz4AWc_Uxhzfzz9fKS
|
||||||
- NVIDIA_VISIBLE_DEVICES=all
|
- NVIDIA_VISIBLE_DEVICES=all
|
||||||
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
|
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/plex:/config
|
- /mnt/services/plex/plex:/config
|
||||||
- /mnt/20tb/Movies:/movies
|
- /mnt/movies:/movies
|
||||||
- /mnt/20tb/TV_Shows:/tvshows
|
- /mnt/tvshows:/tvshows
|
||||||
- /mnt/20tb/Music:/music
|
- /mnt/music:/music
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
tautulli:
|
tautulli:
|
||||||
image: lscr.io/linuxserver/tautulli:latest
|
image: lscr.io/linuxserver/tautulli:latest
|
||||||
container_name: tautulli
|
container_name: tautulli
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/tautulli:/config
|
- /mnt/services/plex/tautulli:/config
|
||||||
ports:
|
ports:
|
||||||
- 8181:8181
|
- 8181:8181
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|||||||
@@ -3,10 +3,12 @@ services:
|
|||||||
image: lscr.io/linuxserver/sabnzbd:latest
|
image: lscr.io/linuxserver/sabnzbd:latest
|
||||||
container_name: sabnzbd
|
container_name: sabnzbd
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- TZ=Europe/Amsterdam
|
- TZ=Europe/Amsterdam
|
||||||
volumes:
|
volumes:
|
||||||
- ./data:/config
|
- /mnt/services/sabnzbd:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
ports:
|
ports:
|
||||||
- 7788:8080
|
- 7788:8080
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ services:
|
|||||||
- './data/certs/live/satisfactory.mvl.sh/fullchain.pem:/config/gamefiles/FactoryGame/Certificates/cert_chain.pem'
|
- './data/certs/live/satisfactory.mvl.sh/fullchain.pem:/config/gamefiles/FactoryGame/Certificates/cert_chain.pem'
|
||||||
- './data/certs/live/satisfactory.mvl.sh/privkey.pem:/config/gamefiles/FactoryGame/Certificates/private_key.pem'
|
- './data/certs/live/satisfactory.mvl.sh/privkey.pem:/config/gamefiles/FactoryGame/Certificates/private_key.pem'
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- MAXPLAYERS=4
|
- MAXPLAYERS=4
|
||||||
- ROOTLESS=false
|
- ROOTLESS=false
|
||||||
- STEAMBETA=false
|
- STEAMBETA=false
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "9999:9999"
|
- "9999:9999"
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- STASH_STASH=/data/
|
- STASH_STASH=/data/
|
||||||
- STASH_GENERATED=/generated/
|
- STASH_GENERATED=/generated/
|
||||||
- STASH_METADATA=/metadata/
|
- STASH_METADATA=/metadata/
|
||||||
@@ -15,14 +17,14 @@ services:
|
|||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
## Keep configs, scrapers, and plugins here.
|
## Keep configs, scrapers, and plugins here.
|
||||||
- ./data/config:/root/.stash
|
- /mnt/services/stash/config:/root/.stash
|
||||||
## Point this at your collection.
|
## Point this at your collection.
|
||||||
- /mnt/20tb/Stash:/data
|
- /mnt/stash:/data
|
||||||
## This is where your stash's metadata lives
|
## This is where your stash's metadata lives
|
||||||
- ./data/metadata:/metadata
|
- /mnt/services/stash/metadata:/metadata
|
||||||
## Any other cache content.
|
## Any other cache content.
|
||||||
- ./data/cache:/cache
|
- /mnt/services/stash/cache:/cache
|
||||||
## Where to store binary blob data (scene covers, images)
|
## Where to store binary blob data (scene covers, images)
|
||||||
- ./data/blobs:/blobs
|
- /mnt/services/stash/blobs:/blobs
|
||||||
## Where to store generated content (screenshots,previews,transcodes,sprites)
|
## Where to store generated content (screenshots,previews,transcodes,sprites)
|
||||||
- ./data/generated:/generated
|
- /mnt/services/stash/generated:/generated
|
||||||
|
|||||||
@@ -10,8 +10,10 @@ services:
|
|||||||
- 6881:6881/udp
|
- 6881:6881/udp
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/gluetun:/gluetun
|
- /mnt/services/torrent/gluetun:/gluetun
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
|
- VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
|
||||||
- OPENVPN_USER=${OPENVPN_USER}
|
- OPENVPN_USER=${OPENVPN_USER}
|
||||||
- OPENVPN_PASSWORD=${OPENVPN_PASSWORD}
|
- OPENVPN_PASSWORD=${OPENVPN_PASSWORD}
|
||||||
@@ -28,8 +30,8 @@ services:
|
|||||||
- WEBUI_PORT=8085
|
- WEBUI_PORT=8085
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- ./data/qbit:/config
|
- /mnt/services/torrent/qbit-config:/config
|
||||||
- /mnt/20tb:/storage
|
- /mnt:/storage
|
||||||
depends_on:
|
depends_on:
|
||||||
- gluetun
|
- gluetun
|
||||||
restart: always
|
restart: always
|
||||||
|
|||||||
@@ -17,9 +17,11 @@ services:
|
|||||||
cap_add:
|
cap_add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
environment:
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
- PEERS=s24,pc,laptop
|
- PEERS=s24,pc,laptop
|
||||||
volumes:
|
volumes:
|
||||||
- ./data:/config
|
- /mnt/services/wireguard/data:/config
|
||||||
ports:
|
ports:
|
||||||
- 51820:51820/udp
|
- 51820:51820/udp
|
||||||
sysctls:
|
sysctls:
|
||||||
|
|||||||
@@ -11,47 +11,57 @@
|
|||||||
./mennos-server/zfs.nix
|
./mennos-server/zfs.nix
|
||||||
];
|
];
|
||||||
networking.hostName = "mennos-server";
|
networking.hostName = "mennos-server";
|
||||||
|
networking.hostId = "64519940";
|
||||||
|
|
||||||
# Bootloader.
|
# Bootloader.
|
||||||
boot.loader.systemd-boot.enable = true;
|
boot.loader.systemd-boot.enable = true;
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
boot.loader.efi.canTouchEfiVariables = true;
|
||||||
|
|
||||||
# TODO: Enable this when I switch from test VM to actual hardware
|
# Load nvidia driver for Xorg and Wayland
|
||||||
|
services.xserver.videoDrivers = [ "nvidia" ];
|
||||||
|
|
||||||
# Enable OpenGL
|
# Enable oepngl and 32-bit support
|
||||||
# hardware.opengl.enable = true;
|
hardware.opengl = {
|
||||||
|
enable = true;
|
||||||
|
driSupport = true;
|
||||||
|
driSupport32Bit = true;
|
||||||
|
};
|
||||||
|
|
||||||
# # Load nvidia driver for Xorg and Wayland
|
# Enable NVIDIA Docker support
|
||||||
# services.xserver.videoDrivers = [ "nvidia" ];
|
# test with: $ docker run --rm -it --device=nvidia.com/gpu=all ubuntu:latest nvidia-smi
|
||||||
|
hardware.nvidia-container-toolkit.enable = true;
|
||||||
|
virtualisation.docker = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
# hardware.nvidia = {
|
hardware.nvidia = {
|
||||||
# # Modesetting is required.
|
# Modesetting is required.
|
||||||
# modesetting.enable = true;
|
modesetting.enable = true;
|
||||||
|
|
||||||
# # Nvidia power management. Experimental, and can cause sleep/suspend to fail.
|
# Nvidia power management. Experimental, and can cause sleep/suspend to fail.
|
||||||
# # Enable this if you have graphical corruption issues or application crashes after waking
|
# Enable this if you have graphical corruption issues or application crashes after waking
|
||||||
# # up from sleep. This fixes it by saving the entire VRAM memory to /tmp/ instead
|
# up from sleep. This fixes it by saving the entire VRAM memory to /tmp/ instead
|
||||||
# # of just the bare essentials.
|
# of just the bare essentials.
|
||||||
# powerManagement.enable = false;
|
powerManagement.enable = false;
|
||||||
|
|
||||||
# # Fine-grained power management. Turns off GPU when not in use.
|
# Fine-grained power management. Turns off GPU when not in use.
|
||||||
# # Experimental and only works on modern Nvidia GPUs (Turing or newer).
|
# Experimental and only works on modern Nvidia GPUs (Turing or newer).
|
||||||
# powerManagement.finegrained = false;
|
powerManagement.finegrained = false;
|
||||||
|
|
||||||
# # Use the NVidia open source kernel module (not to be confused with the
|
# Use the NVidia open source kernel module (not to be confused with the
|
||||||
# # independent third-party "nouveau" open source driver).
|
# independent third-party "nouveau" open source driver).
|
||||||
# # Support is limited to the Turing and later architectures. Full list of
|
# Support is limited to the Turing and later architectures. Full list of
|
||||||
# # supported GPUs is at:
|
# supported GPUs is at:
|
||||||
# # https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus
|
# https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus
|
||||||
# # Only available from driver 515.43.04+
|
# Only available from driver 515.43.04+
|
||||||
# # Currently alpha-quality/buggy, so false is currently the recommended setting.
|
# Currently alpha-quality/buggy, so false is currently the recommended setting.
|
||||||
# open = false;
|
open = false;
|
||||||
|
|
||||||
# # Enable the Nvidia settings menu,
|
# Enable the Nvidia settings menu,
|
||||||
# # accessible via `nvidia-settings`.
|
# accessible via `nvidia-settings`.
|
||||||
# nvidiaSettings = true;
|
nvidiaSettings = true;
|
||||||
|
|
||||||
# # Optionally, you may need to select the appropriate driver version for your specific GPU.
|
# Optionally, you may need to select the appropriate driver version for your specific GPU.
|
||||||
# package = config.boot.kernelPackages.nvidiaPackages.stable;
|
package = config.boot.kernelPackages.nvidiaPackages.stable;
|
||||||
# };
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,303 +1,122 @@
|
|||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
# Create a script to set permissions
|
||||||
|
permissionsScript = pkgs.writeShellScriptBin "set-zfs-permissions" ''
|
||||||
|
# Set default permissions for all service directories
|
||||||
|
find /mnt/services -mindepth 1 -maxdepth 1 -type d \
|
||||||
|
-exec chmod 775 {} \; \
|
||||||
|
-exec chown menno:users {} \;
|
||||||
|
|
||||||
|
# Special cases
|
||||||
|
chmod 774 /mnt/services/golink
|
||||||
|
chown 65532:users /mnt/services/golink
|
||||||
|
|
||||||
|
chmod 754 /mnt/services/torrent
|
||||||
|
chown menno:users /mnt/services/torrent
|
||||||
|
|
||||||
|
chmod 755 /mnt/services/proxy
|
||||||
|
chmod 755 /mnt/services/static-websites
|
||||||
|
|
||||||
|
# Set permissions for other mount points
|
||||||
|
for dir in /mnt/{ai,astrophotography,audiobooks,downloads,ISOs,movies,music,old_backups,photos,stash,tvshows,VMs}; do
|
||||||
|
chmod 755 "$dir"
|
||||||
|
chown menno:users "$dir"
|
||||||
|
done
|
||||||
|
'';
|
||||||
|
in
|
||||||
{
|
{
|
||||||
# Enable ZFS support
|
|
||||||
boot.supportedFilesystems = [ "zfs" ];
|
|
||||||
boot.zfs.enableUnstable = false;
|
|
||||||
|
|
||||||
# ZFS system services
|
|
||||||
services.zfs = {
|
|
||||||
autoSnapshot = {
|
|
||||||
enable = true;
|
|
||||||
frequent = 4; # Keep 4 15-minute snapshots
|
|
||||||
hourly = 24; # Keep 24 hourly snapshots
|
|
||||||
daily = 7; # Keep 7 daily snapshots
|
|
||||||
weekly = 4; # Keep 4 weekly snapshots
|
|
||||||
monthly = 12; # Keep 12 monthly snapshots
|
|
||||||
};
|
|
||||||
autoScrub = {
|
|
||||||
enable = true;
|
|
||||||
interval = "weekly"; # Scrub pools weekly
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Your ZFS pool and datasets will be automatically imported
|
|
||||||
# But we can specify mount points explicitly for clarity
|
|
||||||
fileSystems = {
|
|
||||||
"/mnt/20tb/Movies" = {
|
|
||||||
device = "datapool/movies";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/TV_Shows" = {
|
|
||||||
device = "datapool/tv_shows";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Music" = {
|
|
||||||
device = "datapool/music";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Astrophotography" = {
|
|
||||||
device = "datapool/astro";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Downloads" = {
|
|
||||||
device = "datapool/downloads";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Photos" = {
|
|
||||||
device = "datapool/photos";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Stash" = {
|
|
||||||
device = "datapool/stash";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/ISOs" = {
|
|
||||||
device = "datapool/isos";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Audiobooks" = {
|
|
||||||
device = "datapool/audiobooks";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/VMs" = {
|
|
||||||
device = "datapool/vms";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Old_Backups" = {
|
|
||||||
device = "datapool/old_backups";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
"/mnt/20tb/Services" = {
|
|
||||||
device = "datapool/services";
|
|
||||||
fsType = "zfs";
|
|
||||||
options = [ "defaults" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Install ZFS utilities
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
zfs
|
zfs
|
||||||
zfstools
|
zfstools
|
||||||
|
permissionsScript
|
||||||
];
|
];
|
||||||
|
|
||||||
|
# Add the permissions service
|
||||||
|
systemd.services.zfs-permissions = {
|
||||||
|
description = "Set ZFS mount permissions";
|
||||||
|
|
||||||
|
# Run after ZFS mounts are available
|
||||||
|
after = [ "zfs.target" ];
|
||||||
|
requires = [ "zfs.target" ];
|
||||||
|
|
||||||
|
# Run on boot and every 6 hours
|
||||||
|
startAt = "*-*-* */6:00:00";
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
ExecStart = "${permissionsScript}/bin/set-zfs-permissions";
|
||||||
|
User = "root";
|
||||||
|
Group = "root";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Enable ZFS support
|
||||||
|
boot.supportedFilesystems = [ "zfs" ];
|
||||||
|
|
||||||
|
# ZFS system services
|
||||||
|
services.zfs = {
|
||||||
|
autoScrub = {
|
||||||
|
enable = true;
|
||||||
|
interval = "weekly";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
# If you want to keep compression settings
|
# If you want to keep compression settings
|
||||||
boot.kernelParams = [ "zfs.zfs_compressed_arc_enabled=1" ];
|
boot.kernelParams = [ "zfs.zfs_compressed_arc_enabled=1" ];
|
||||||
|
|
||||||
systemd.services.zfs-permissions = {
|
fileSystems = {
|
||||||
description = "Set correct permissions on ZFS datasets";
|
"/mnt/ai" = {
|
||||||
after = [ "zfs-mount.service" ];
|
device = "datapool/ai";
|
||||||
wantedBy = [ "multi-user.target" ];
|
fsType = "zfs";
|
||||||
script = ''
|
|
||||||
# Set ownership and permissions for each dataset
|
|
||||||
# Astrophotography - menno:menno 770
|
|
||||||
zfs set acltype=posixacl datapool/astro
|
|
||||||
zfs set xattr=sa datapool/astro
|
|
||||||
chown menno:menno /mnt/20tb/Astrophotography
|
|
||||||
chmod 770 /mnt/20tb/Astrophotography
|
|
||||||
|
|
||||||
# Audiobooks - menno:users 760
|
|
||||||
zfs set acltype=posixacl datapool/audiobooks
|
|
||||||
zfs set xattr=sa datapool/audiobooks
|
|
||||||
chown menno:users /mnt/20tb/Audiobooks
|
|
||||||
chmod 760 /mnt/20tb/Audiobooks
|
|
||||||
|
|
||||||
# Downloads - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/Downloads
|
|
||||||
chmod 760 /mnt/20tb/Downloads
|
|
||||||
|
|
||||||
# ISOs - menno:libvirt 777
|
|
||||||
chown menno:libvirt /mnt/20tb/ISOs
|
|
||||||
chmod 777 /mnt/20tb/ISOs
|
|
||||||
|
|
||||||
# VMs - menno:libvirt 777
|
|
||||||
chown menno:libvirt /mnt/20tb/VMs
|
|
||||||
chmod 777 /mnt/20tb/VMs
|
|
||||||
|
|
||||||
# Movies - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/Movies
|
|
||||||
chmod 760 /mnt/20tb/Movies
|
|
||||||
|
|
||||||
# Music - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/Music
|
|
||||||
chmod 760 /mnt/20tb/Music
|
|
||||||
|
|
||||||
# Old_Backups - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/Old_Backups
|
|
||||||
chmod 760 /mnt/20tb/Old_Backups
|
|
||||||
|
|
||||||
# Photos - menno:menno 775
|
|
||||||
chown menno:menno /mnt/20tb/Photos
|
|
||||||
chmod 775 /mnt/20tb/Photos
|
|
||||||
|
|
||||||
# Services - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/Services
|
|
||||||
chmod 760 /mnt/20tb/Services
|
|
||||||
|
|
||||||
# Stash - menno:menno 775
|
|
||||||
chown menno:menno /mnt/20tb/Stash
|
|
||||||
chmod 775 /mnt/20tb/Stash
|
|
||||||
|
|
||||||
# TV_Shows - menno:users 760
|
|
||||||
chown menno:users /mnt/20tb/TV_Shows
|
|
||||||
chmod 760 /mnt/20tb/TV_Shows
|
|
||||||
'';
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
RemainAfterExit = true;
|
|
||||||
};
|
};
|
||||||
|
"/mnt/astrophotography" = {
|
||||||
|
device = "datapool/astro";
|
||||||
|
fsType = "zfs";
|
||||||
};
|
};
|
||||||
|
"/mnt/audiobooks" = {
|
||||||
environment.etc."local/bin/zfs-backup.sh" = {
|
device = "datapool/audiobooks";
|
||||||
mode = "0755";
|
fsType = "zfs";
|
||||||
text = ''
|
|
||||||
#!/bin/bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
DATE=$(date +%Y%m%d-%H%M)
|
|
||||||
# Updated DATASETS list to match your actual datasets
|
|
||||||
DATASETS="movies tv_shows music astro downloads photos stash isos audiobooks vms old_backups services"
|
|
||||||
RETAIN_SNAPSHOTS=24
|
|
||||||
BACKUP_POOL="backup"
|
|
||||||
SOURCE_POOL="datapool"
|
|
||||||
|
|
||||||
log() {
|
|
||||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure_backup_pool() {
|
|
||||||
if ! zpool list "$BACKUP_POOL" >/dev/null 2>&1; then
|
|
||||||
log "ERROR: Backup pool '$BACKUP_POOL' does not exist!"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_dataset_exists() {
|
|
||||||
local pool=$1
|
|
||||||
local dataset=$2
|
|
||||||
zfs list "$pool/$dataset" >/dev/null 2>&1
|
|
||||||
return $?
|
|
||||||
}
|
|
||||||
|
|
||||||
create_backup_dataset() {
|
|
||||||
local dataset=$1
|
|
||||||
local source_pool="$SOURCE_POOL"
|
|
||||||
local backup_pool="$BACKUP_POOL"
|
|
||||||
|
|
||||||
# Get properties from source dataset
|
|
||||||
local props=$(zfs get -H -o property,value all "$source_pool/$dataset" | \
|
|
||||||
grep -E '^(compression|recordsize|atime|relatime|xattr|acltype)' | \
|
|
||||||
awk '{printf "-o %s=%s ", $1, $2}')
|
|
||||||
|
|
||||||
log "Creating backup dataset $backup_pool/$dataset with matching properties"
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
zfs create -p ${props} "$backup_pool/$dataset"
|
|
||||||
|
|
||||||
# Set some backup-specific properties
|
|
||||||
zfs set readonly=on "$backup_pool/$dataset"
|
|
||||||
zfs set snapdir=visible "$backup_pool/$dataset"
|
|
||||||
log "Successfully created backup dataset $backup_pool/$dataset"
|
|
||||||
}
|
|
||||||
|
|
||||||
get_latest_snapshot() {
|
|
||||||
local pool=$1
|
|
||||||
local dataset=$2
|
|
||||||
local snapshot
|
|
||||||
snapshot=$(zfs list -t snapshot -H -o name "$pool/$dataset" 2>/dev/null | grep backup- | tail -n1) || true
|
|
||||||
echo "$snapshot"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ensure backup pool exists
|
|
||||||
ensure_backup_pool
|
|
||||||
|
|
||||||
for ds in $DATASETS; do
|
|
||||||
log "Processing dataset $ds"
|
|
||||||
|
|
||||||
# Check if source dataset exists
|
|
||||||
if ! check_dataset_exists "$SOURCE_POOL" "$ds"; then
|
|
||||||
log "Skipping $ds - source dataset $SOURCE_POOL/$ds does not exist"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create backup dataset if it doesn't exist
|
|
||||||
if ! check_dataset_exists "$BACKUP_POOL" "$ds"; then
|
|
||||||
log "Backup dataset $BACKUP_POOL/$ds does not exist"
|
|
||||||
create_backup_dataset "$ds"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create new snapshot
|
|
||||||
local snapshot_name="$SOURCE_POOL/$ds@backup-$DATE"
|
|
||||||
log "Creating new snapshot $snapshot_name"
|
|
||||||
zfs snapshot "$snapshot_name"
|
|
||||||
|
|
||||||
LATEST_BACKUP=$(get_latest_snapshot "$BACKUP_POOL" "$ds")
|
|
||||||
|
|
||||||
if [ -z "$LATEST_BACKUP" ]; then
|
|
||||||
log "No existing backup found - performing full backup of $ds"
|
|
||||||
zfs send "$snapshot_name" | zfs receive -F "$BACKUP_POOL/$ds"
|
|
||||||
else
|
|
||||||
LATEST_SOURCE=$(get_latest_snapshot "$SOURCE_POOL" "$ds" | grep -v "backup-$DATE" | tail -n1)
|
|
||||||
if [ -n "$LATEST_SOURCE" ]; then
|
|
||||||
log "Performing incremental backup of $ds from $LATEST_SOURCE to backup-$DATE"
|
|
||||||
zfs send -i "$LATEST_SOURCE" "$snapshot_name" | zfs receive -F "$BACKUP_POOL/$ds"
|
|
||||||
else
|
|
||||||
log "No suitable source snapshot found for incremental backup - performing full backup of $ds"
|
|
||||||
zfs send "$snapshot_name" | zfs receive -F "$BACKUP_POOL/$ds"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "Cleaning up old snapshots for $ds"
|
|
||||||
|
|
||||||
# Cleanup source snapshots
|
|
||||||
if snapshots=$(zfs list -t snapshot -H -o name "$SOURCE_POOL/$ds" | grep backup-); then
|
|
||||||
echo "$snapshots" | head -n -$RETAIN_SNAPSHOTS | while read -r snap; do
|
|
||||||
log "Removing source snapshot: $snap"
|
|
||||||
zfs destroy "$snap"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleanup backup snapshots
|
|
||||||
if snapshots=$(zfs list -t snapshot -H -o name "$BACKUP_POOL/$ds" | grep backup-); then
|
|
||||||
echo "$snapshots" | head -n -$RETAIN_SNAPSHOTS | while read -r snap; do
|
|
||||||
log "Removing backup snapshot: $snap"
|
|
||||||
zfs destroy "$snap"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
log "Backup completed successfully"
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
"/mnt/downloads" = {
|
||||||
systemd.services.zfs-backup = {
|
device = "datapool/downloads";
|
||||||
description = "ZFS Backup Service";
|
fsType = "zfs";
|
||||||
requires = [ "zfs.target" ];
|
|
||||||
after = [ "zfs.target" ];
|
|
||||||
path = [ pkgs.zfs ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
ExecStart = "/etc/local/bin/zfs-backup.sh";
|
|
||||||
User = "root";
|
|
||||||
};
|
};
|
||||||
|
"/mnt/ISOs" = {
|
||||||
|
device = "datapool/isos";
|
||||||
|
fsType = "zfs";
|
||||||
};
|
};
|
||||||
|
"/mnt/movies" = {
|
||||||
systemd.timers.zfs-backup = {
|
device = "datapool/movies";
|
||||||
description = "Run ZFS backup every 4 hours";
|
fsType = "zfs";
|
||||||
wantedBy = [ "timers.target" ];
|
};
|
||||||
timerConfig = {
|
"/mnt/music" = {
|
||||||
OnBootSec = "15min";
|
device = "datapool/music";
|
||||||
OnUnitActiveSec = "4h";
|
fsType = "zfs";
|
||||||
RandomizedDelaySec = "5min";
|
};
|
||||||
|
"/mnt/old_backups" = {
|
||||||
|
device = "datapool/old_backups";
|
||||||
|
fsType = "zfs";
|
||||||
|
};
|
||||||
|
"/mnt/photos" = {
|
||||||
|
device = "datapool/photos";
|
||||||
|
fsType = "zfs";
|
||||||
|
};
|
||||||
|
"/mnt/services" = {
|
||||||
|
device = "datapool/services";
|
||||||
|
fsType = "zfs";
|
||||||
|
};
|
||||||
|
"/mnt/stash" = {
|
||||||
|
device = "datapool/stash";
|
||||||
|
fsType = "zfs";
|
||||||
|
};
|
||||||
|
"/mnt/tvshows" = {
|
||||||
|
device = "datapool/tv_shows";
|
||||||
|
fsType = "zfs";
|
||||||
|
};
|
||||||
|
"/mnt/VMs" = {
|
||||||
|
device = "datapool/vms";
|
||||||
|
fsType = "zfs";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
{
|
{
|
||||||
imports = [ ./virtualization.nix ];
|
imports = [ ./virtualisation.nix ];
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
yubikey-manager
|
yubikey-manager
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
files = builtins.removeAttrs (builtins.readDir ./.) [ "default.nix" ];
|
files = builtins.removeAttrs (builtins.readDir ./.) [
|
||||||
|
"default.nix"
|
||||||
|
"mennovanleeuwen.nl"
|
||||||
|
];
|
||||||
|
|
||||||
# Import all other .nix files as modules
|
# Import all other .nix files as modules
|
||||||
moduleFiles = builtins.map (fname: ./. + "/${fname}") (builtins.attrNames files);
|
moduleFiles = builtins.map (fname: ./. + "/${fname}") (builtins.attrNames files);
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
{
|
{
|
||||||
# Resume/CV Website (Nginx)
|
# Resume/CV Website (Nginx)
|
||||||
services.nginx = {
|
# services.nginx = {
|
||||||
enable = true;
|
# enable = true;
|
||||||
virtualHosts."localhost:4203" = {
|
# virtualHosts."localhost:4203" = {
|
||||||
root = "/home/menno/dotfiles/config/nixos/packages/server/mennovanleeuwen.nl";
|
# root = "/home/menno/dotfiles/config/nixos/packages/server/mennovanleeuwen.nl";
|
||||||
locations."/" = {
|
# locations."/" = {
|
||||||
index = "index.html";
|
# index = "index.html";
|
||||||
};
|
# };
|
||||||
};
|
# };
|
||||||
};
|
# };
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,128 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
services.traefik = {
|
|
||||||
enable = true;
|
|
||||||
staticConfigOptions = {
|
|
||||||
entryPoints = {
|
|
||||||
web.address = ":80";
|
|
||||||
websecure.address = ":443";
|
|
||||||
};
|
|
||||||
certificatesResolvers.letsencrypt.acme = {
|
|
||||||
email = "menno@vleeuwen.me";
|
|
||||||
storage = "/var/lib/traefik/acme.json";
|
|
||||||
httpChallenge.entryPoint = "web";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
dynamicConfigOptions = {
|
|
||||||
http = {
|
|
||||||
# Plex Media Server
|
|
||||||
routers.plex = {
|
|
||||||
rule = "Host(`plex.vleeuwen.me`)";
|
|
||||||
service = "plex";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.plex.loadBalancer.servers = [ { url = "http://127.0.0.1:32400"; } ];
|
|
||||||
|
|
||||||
# Tautulli (Plex Stats)
|
|
||||||
routers.tautulli = {
|
|
||||||
rule = "Host(`tautulli.vleeuwen.me`)";
|
|
||||||
service = "tautulli";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.tautulli.loadBalancer.servers = [ { url = "http://127.0.0.1:8181"; } ];
|
|
||||||
|
|
||||||
# Jellyfin
|
|
||||||
routers.jellyfin = {
|
|
||||||
rule = "Host(`jellyfin.vleeuwen.me`)";
|
|
||||||
service = "jellyfin";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.jellyfin.loadBalancer.servers = [ { url = "http://127.0.0.1:8096"; } ];
|
|
||||||
|
|
||||||
# Overseerr
|
|
||||||
routers.overseerr = {
|
|
||||||
rule = "Host(`overseerr.vleeuwen.me`)";
|
|
||||||
service = "overseerr";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.overseerr.loadBalancer.servers = [ { url = "http://127.0.0.1:5555"; } ];
|
|
||||||
|
|
||||||
# Immich (Google Photos alternative)
|
|
||||||
routers.immich = {
|
|
||||||
rule = "Host(`photos.vleeuwen.me`)";
|
|
||||||
service = "immich";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.immich.loadBalancer.servers = [ { url = "http://127.0.0.1:2283"; } ];
|
|
||||||
|
|
||||||
# Gitea Git Server
|
|
||||||
routers.gitea = {
|
|
||||||
rule = "Host(`git.mvl.sh`)";
|
|
||||||
service = "gitea";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.gitea.loadBalancer.servers = [ { url = "http://127.0.0.1:3030"; } ];
|
|
||||||
|
|
||||||
# Home Assistant
|
|
||||||
routers.homeassistant = {
|
|
||||||
rule = "Host(`home.vleeuwen.me`)";
|
|
||||||
service = "homeassistant";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.homeassistant.loadBalancer.servers = [ { url = "http://192.168.86.254:8123"; } ];
|
|
||||||
|
|
||||||
# InfluxDB for Home Assistant
|
|
||||||
routers.influxdb = {
|
|
||||||
rule = "Host(`influxdb.vleeuwen.me`)";
|
|
||||||
service = "influxdb";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.influxdb.loadBalancer.servers = [ { url = "http://192.168.86.254:8086"; } ];
|
|
||||||
|
|
||||||
# Bluemap for Minecraft
|
|
||||||
routers.bluemap = {
|
|
||||||
rule = "Host(`map.mvl.sh`)";
|
|
||||||
service = "bluemap";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.bluemap.loadBalancer.servers = [ { url = "http://127.0.0.1:3456"; } ];
|
|
||||||
|
|
||||||
# Factorio Server Manager
|
|
||||||
routers.factorio = {
|
|
||||||
rule = "Host(`fsm.mvl.sh`)";
|
|
||||||
service = "factorio";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.factorio.loadBalancer.servers = [ { url = "http://127.0.0.1:5080"; } ];
|
|
||||||
|
|
||||||
# Resume/CV Website
|
|
||||||
routers.personal-site = {
|
|
||||||
rule = "Host(`mennovanleeuwen.nl`)";
|
|
||||||
service = "personal-site";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.personal-site.loadBalancer.servers = [ { url = "http://127.0.0.1:4203"; } ];
|
|
||||||
|
|
||||||
# Duplicati Notification Server
|
|
||||||
routers.duplicati-notif = {
|
|
||||||
rule = "Host(`duplicati-notifications.mvl.sh`)";
|
|
||||||
service = "duplicati-notif";
|
|
||||||
entryPoints = [ "websecure" ];
|
|
||||||
tls.certResolver = "letsencrypt";
|
|
||||||
};
|
|
||||||
services.duplicati-notif.loadBalancer.servers = [ { url = "http://127.0.0.1:5334"; } ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
3
setup.sh
3
setup.sh
@@ -441,9 +441,6 @@ main() {
|
|||||||
# Create setup marker
|
# Create setup marker
|
||||||
touch "$SETUP_MARKER" || die "Failed to create setup marker"
|
touch "$SETUP_MARKER" || die "Failed to create setup marker"
|
||||||
|
|
||||||
# Remove remnant files
|
|
||||||
rm "$HOME/.hostname" || die "Failed to remove hostname file"
|
|
||||||
|
|
||||||
# Final success message
|
# Final success message
|
||||||
log_success "\nSetup complete. Please logout / restart to continue with 'dotf update'.\n"
|
log_success "\nSetup complete. Please logout / restart to continue with 'dotf update'.\n"
|
||||||
log_error "\n!!! Please logout / restart to continue !!!"
|
log_error "\n!!! Please logout / restart to continue !!!"
|
||||||
|
|||||||
Reference in New Issue
Block a user