refactor
This commit is contained in:
93
ansible/tasks/servers/borg-backup.yml
Normal file
93
ansible/tasks/servers/borg-backup.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
- name: Borg Backup Installation and Configuration
|
||||
block:
|
||||
- name: Check if Borg is already installed
|
||||
ansible.builtin.command: which borg
|
||||
register: borg_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure Borg is installed
|
||||
ansible.builtin.package:
|
||||
name: borg
|
||||
state: present
|
||||
become: true
|
||||
when: borg_check.rc != 0
|
||||
|
||||
- name: Set Borg backup facts
|
||||
ansible.builtin.set_fact:
|
||||
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
|
||||
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
|
||||
borg_backup_dir: "/mnt/services"
|
||||
borg_repo_dir: "/mnt/object_storage/borg-repo"
|
||||
|
||||
- name: Create Borg directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ borg_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ borg_config_dir }}"
|
||||
- "/mnt/object_storage"
|
||||
loop_control:
|
||||
loop_var: borg_dir
|
||||
become: true
|
||||
|
||||
- name: Check if Borg repository exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ borg_repo_dir }}/config"
|
||||
register: borg_repo_check
|
||||
become: true
|
||||
|
||||
- name: Initialize Borg repository
|
||||
ansible.builtin.command: >
|
||||
borg init --encryption=repokey {{ borg_repo_dir }}
|
||||
environment:
|
||||
BORG_PASSPHRASE: "{{ borg_passphrase }}"
|
||||
become: true
|
||||
when: not borg_repo_check.stat.exists
|
||||
|
||||
- name: Create Borg backup script
|
||||
ansible.builtin.template:
|
||||
src: templates/borg-backup.sh.j2
|
||||
dest: "{{ borg_config_dir }}/backup.sh"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Create Borg systemd service
|
||||
ansible.builtin.template:
|
||||
src: templates/borg-backup.service.j2
|
||||
dest: /etc/systemd/system/borg-backup.service
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: borg_service
|
||||
|
||||
- name: Create Borg systemd timer
|
||||
ansible.builtin.template:
|
||||
src: templates/borg-backup.timer.j2
|
||||
dest: /etc/systemd/system/borg-backup.timer
|
||||
mode: "0644"
|
||||
become: true
|
||||
register: borg_timer
|
||||
|
||||
- name: Reload systemd daemon
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
when: borg_service.changed or borg_timer.changed
|
||||
|
||||
- name: Enable and start Borg backup timer
|
||||
ansible.builtin.systemd:
|
||||
name: borg-backup.timer
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Display Borg backup status
|
||||
ansible.builtin.debug:
|
||||
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
|
||||
|
||||
tags:
|
||||
- borg-backup
|
||||
- borg
|
||||
- backup
|
||||
95
ansible/tasks/servers/borg-local-sync.yml
Normal file
95
ansible/tasks/servers/borg-local-sync.yml
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
- name: Borg Local Sync Installation and Configuration
|
||||
block:
|
||||
- name: Set Borg backup facts
|
||||
ansible.builtin.set_fact:
|
||||
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
|
||||
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
|
||||
borg_backup_dir: "/mnt/services"
|
||||
borg_repo_dir: "/mnt/object_storage/borg-repo"
|
||||
|
||||
- name: Create Borg local sync script
|
||||
template:
|
||||
src: borg-local-sync.sh.j2
|
||||
dest: /usr/local/bin/borg-local-sync.sh
|
||||
mode: "0755"
|
||||
owner: root
|
||||
group: root
|
||||
become: yes
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: Create Borg local sync systemd service
|
||||
template:
|
||||
src: borg-local-sync.service.j2
|
||||
dest: /etc/systemd/system/borg-local-sync.service
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
become: yes
|
||||
notify:
|
||||
- reload systemd
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: Create Borg local sync systemd timer
|
||||
template:
|
||||
src: borg-local-sync.timer.j2
|
||||
dest: /etc/systemd/system/borg-local-sync.timer
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
become: yes
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart borg-local-sync-timer
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: Create log file for Borg local sync
|
||||
file:
|
||||
path: /var/log/borg-local-sync.log
|
||||
state: touch
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
become: yes
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: Enable and start Borg local sync timer
|
||||
systemd:
|
||||
name: borg-local-sync.timer
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
become: yes
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: Add logrotate configuration for Borg local sync
|
||||
copy:
|
||||
content: |
|
||||
/var/log/borg-local-sync.log {
|
||||
daily
|
||||
rotate 30
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 644 root root
|
||||
}
|
||||
dest: /etc/logrotate.d/borg-local-sync
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
become: yes
|
||||
tags:
|
||||
- borg-local-sync
|
||||
- borg
|
||||
- backup
|
||||
|
||||
tags:
|
||||
- borg-local-sync
|
||||
- borg
|
||||
- backup
|
||||
88
ansible/tasks/servers/dynamic-dns.yml
Normal file
88
ansible/tasks/servers/dynamic-dns.yml
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
- name: Dynamic DNS setup
|
||||
block:
|
||||
- name: Create systemd environment file for dynamic DNS
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
|
||||
dest: "/etc/dynamic-dns-systemd.env"
|
||||
mode: "0600"
|
||||
owner: root
|
||||
group: root
|
||||
become: true
|
||||
|
||||
- name: Create dynamic DNS wrapper script
|
||||
ansible.builtin.copy:
|
||||
dest: "/usr/local/bin/dynamic-dns-update.sh"
|
||||
mode: "0755"
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
# Run dynamic DNS update (binary compiled by utils.yml)
|
||||
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
|
||||
become: true
|
||||
|
||||
- name: Create dynamic DNS systemd timer
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/dynamic-dns.timer"
|
||||
mode: "0644"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Dynamic DNS Update Timer
|
||||
Requires=dynamic-dns.service
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*:0/15
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
become: true
|
||||
register: ddns_timer
|
||||
|
||||
- name: Create dynamic DNS systemd service
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/dynamic-dns.service"
|
||||
mode: "0644"
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Dynamic DNS Update
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/dynamic-dns-update.sh
|
||||
EnvironmentFile=/etc/dynamic-dns-systemd.env
|
||||
User={{ ansible_user }}
|
||||
Group={{ ansible_user }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
become: true
|
||||
register: ddns_service
|
||||
|
||||
- name: Reload systemd daemon
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
when: ddns_timer.changed or ddns_service.changed
|
||||
|
||||
- name: Enable and start dynamic DNS timer
|
||||
ansible.builtin.systemd:
|
||||
name: dynamic-dns.timer
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Display setup completion message
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Dynamic DNS setup complete!
|
||||
- Systemd timer: sudo systemctl status dynamic-dns.timer
|
||||
- Check logs: sudo journalctl -u dynamic-dns.service -f
|
||||
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
|
||||
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
|
||||
|
||||
when: inventory_hostname == 'mennos-desktop' or inventory_hostname == 'mennos-vps'
|
||||
tags:
|
||||
- dynamic-dns
|
||||
94
ansible/tasks/servers/juicefs.yml
Normal file
94
ansible/tasks/servers/juicefs.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: JuiceFS Installation and Configuration
|
||||
block:
|
||||
- name: Check if JuiceFS is already installed
|
||||
ansible.builtin.command: which juicefs
|
||||
register: juicefs_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Install JuiceFS using the automatic installer
|
||||
ansible.builtin.shell: curl -sSL https://d.juicefs.com/install | sh -
|
||||
register: juicefs_installation
|
||||
when: juicefs_check.rc != 0
|
||||
become: true
|
||||
|
||||
- name: Verify JuiceFS installation
|
||||
ansible.builtin.command: juicefs version
|
||||
register: juicefs_version
|
||||
changed_when: false
|
||||
when: juicefs_check.rc != 0 or juicefs_installation.changed
|
||||
|
||||
- name: Create mount directory
|
||||
ansible.builtin.file:
|
||||
path: /mnt/object_storage
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Create cache directory
|
||||
ansible.builtin.file:
|
||||
path: /var/jfsCache
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Configure JuiceFS network performance optimizations
|
||||
ansible.builtin.sysctl:
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
reload: true
|
||||
become: true
|
||||
loop:
|
||||
- { name: "net.core.rmem_max", value: "16777216" }
|
||||
- { name: "net.core.wmem_max", value: "16777216" }
|
||||
- { name: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" }
|
||||
- { name: "net.ipv4.tcp_wmem", value: "4096 65536 16777216" }
|
||||
|
||||
- name: Set JuiceFS facts
|
||||
ansible.builtin.set_fact:
|
||||
hetzner_access_key: "{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_ACCESS_KEY_ID') }}"
|
||||
hetzner_secret_key:
|
||||
"{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_SECRET_ACCESS_KEY')
|
||||
}}"
|
||||
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
|
||||
|
||||
- name: Create JuiceFS systemd service file
|
||||
ansible.builtin.template:
|
||||
src: templates/juicefs.service.j2
|
||||
dest: /etc/systemd/system/juicefs.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Reload systemd daemon
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
- name: Include JuiceFS Redis tasks
|
||||
ansible.builtin.include_tasks: services/redis/redis.yml
|
||||
when: inventory_hostname == 'mennos-desktop'
|
||||
|
||||
- name: Enable and start JuiceFS service
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Check if JuiceFS is mounted
|
||||
ansible.builtin.shell: df -h | grep /mnt/object_storage
|
||||
become: true
|
||||
register: mount_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Display mount status
|
||||
ansible.builtin.debug:
|
||||
msg: "JuiceFS is successfully mounted at /mnt/object_storage"
|
||||
when: mount_check.rc == 0
|
||||
tags:
|
||||
- juicefs
|
||||
157
ansible/tasks/servers/server.yml
Normal file
157
ansible/tasks/servers/server.yml
Normal file
@@ -0,0 +1,157 @@
|
||||
---
|
||||
- name: Server setup
|
||||
block:
|
||||
- name: Ensure openssh-server is installed on Arch-based systems
|
||||
ansible.builtin.package:
|
||||
name: openssh
|
||||
state: present
|
||||
when: ansible_pkg_mgr == 'pacman'
|
||||
|
||||
- name: Ensure openssh-server is installed on non-Arch systems
|
||||
ansible.builtin.package:
|
||||
name: openssh-server
|
||||
state: present
|
||||
when: ansible_pkg_mgr != 'pacman'
|
||||
|
||||
- name: Ensure Borg is installed on Arch-based systems
|
||||
ansible.builtin.package:
|
||||
name: borg
|
||||
state: present
|
||||
become: true
|
||||
when: ansible_pkg_mgr == 'pacman'
|
||||
|
||||
- name: Ensure Borg is installed on Debian/Ubuntu systems
|
||||
ansible.builtin.package:
|
||||
name: borgbackup
|
||||
state: present
|
||||
become: true
|
||||
when: ansible_pkg_mgr != 'pacman'
|
||||
|
||||
- name: Include JuiceFS tasks
|
||||
ansible.builtin.include_tasks: juicefs.yml
|
||||
tags:
|
||||
- juicefs
|
||||
|
||||
- name: Include Dynamic DNS tasks
|
||||
ansible.builtin.include_tasks: dynamic-dns.yml
|
||||
tags:
|
||||
- dynamic-dns
|
||||
|
||||
- name: Include Borg Backup tasks
|
||||
ansible.builtin.include_tasks: borg-backup.yml
|
||||
tags:
|
||||
- borg-backup
|
||||
|
||||
- name: Include Borg Local Sync tasks
|
||||
ansible.builtin.include_tasks: borg-local-sync.yml
|
||||
tags:
|
||||
- borg-local-sync
|
||||
|
||||
- name: System performance optimizations
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
state: present
|
||||
reload: true
|
||||
become: true
|
||||
loop:
|
||||
- { name: "fs.file-max", value: "2097152" } # Max open files for the entire system
|
||||
- { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have
|
||||
- { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory
|
||||
- { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches
|
||||
- { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket
|
||||
- { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input
|
||||
- { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state
|
||||
- { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections
|
||||
|
||||
- name: Include service tasks
|
||||
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
|
||||
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
tags:
|
||||
- services
|
||||
- always
|
||||
|
||||
vars:
|
||||
services:
|
||||
- name: dashy
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: gitea
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: factorio
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: dozzle
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: beszel
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: caddy
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: golink
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: immich
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: plex
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: tautulli
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: stash
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: downloaders
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: wireguard
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: nextcloud
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: echoip
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: arr-stack
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: home-assistant
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: privatebin
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: unifi-network-application
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
- name: avorion
|
||||
enabled: true
|
||||
hosts:
|
||||
- mennos-desktop
|
||||
38
ansible/tasks/servers/services/arr-stack/arr-stack.yml
Normal file
38
ansible/tasks/servers/services/arr-stack/arr-stack.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: Deploy ArrStack service
|
||||
block:
|
||||
- name: Set ArrStack directories
|
||||
ansible.builtin.set_fact:
|
||||
arr_stack_service_dir: "{{ ansible_env.HOME }}/.services/arr-stack"
|
||||
arr_stack_data_dir: "/mnt/services/arr-stack"
|
||||
|
||||
- name: Create ArrStack directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ arr_stack_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create ArrStack data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ arr_stack_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy ArrStack docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ arr_stack_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: arr_stack_template_result
|
||||
|
||||
- name: Stop ArrStack service
|
||||
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: arr_stack_template_result.changed
|
||||
|
||||
- name: Start ArrStack service
|
||||
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" up -d
|
||||
when: arr_stack_template_result.changed
|
||||
tags:
|
||||
- services
|
||||
- arr_stack
|
||||
- arr-stack
|
||||
181
ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2
Normal file
181
ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2
Normal file
@@ -0,0 +1,181 @@
|
||||
name: arr-stack
|
||||
services:
|
||||
radarr:
|
||||
container_name: radarr
|
||||
image: lscr.io/linuxserver/radarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 7878:7878
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/radarr-config:/config
|
||||
- /mnt/data:/mnt/data
|
||||
restart: "unless-stopped"
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
sonarr:
|
||||
image: linuxserver/sonarr:latest
|
||||
container_name: sonarr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/sonarr-config:/config
|
||||
- /mnt/data:/mnt/data
|
||||
ports:
|
||||
- 8989:8989
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
whisparr:
|
||||
image: ghcr.io/hotio/whisparr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 6969:6969
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/whisparr-config:/config
|
||||
- /mnt/data:/mnt/data
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
prowlarr:
|
||||
container_name: prowlarr
|
||||
image: linuxserver/prowlarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/prowlarr-config:/config
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
ports:
|
||||
- 9696:9696
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
flaresolverr:
|
||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||
container_name: flaresolverr
|
||||
environment:
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- LOG_HTML=${LOG_HTML:-false}
|
||||
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- "8191:8191"
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
overseerr:
|
||||
image: sctx/overseerr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/overseerr-config:/app/config
|
||||
ports:
|
||||
- 5055:5055
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr_stack_net
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
tdarr:
|
||||
image: ghcr.io/haveagitgat/tdarr:latest
|
||||
container_name: tdarr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- serverIP=0.0.0.0
|
||||
- serverPort=8266
|
||||
- webUIPort=8265
|
||||
- internalNode=true
|
||||
- inContainer=true
|
||||
- ffmpegVersion=7
|
||||
- nodeName=MyInternalNode
|
||||
- auth=false
|
||||
- openBrowser=true
|
||||
- maxLogSizeMB=10
|
||||
- cronPluginUpdate=
|
||||
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/tdarr-server:/app/server
|
||||
- {{ arr_stack_data_dir }}/tdarr-config:/app/configs
|
||||
- {{ arr_stack_data_dir }}/tdarr-logs:/app/logs
|
||||
- /mnt/data:/media
|
||||
- {{ arr_stack_data_dir }}/tdarr-cache:/temp
|
||||
ports:
|
||||
- 8265:8265
|
||||
- 8266:8266
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
runtime: nvidia
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
networks:
|
||||
- arr_stack_net
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
|
||||
networks:
|
||||
arr_stack_net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
37
ansible/tasks/servers/services/avorion/avorion.yml
Normal file
37
ansible/tasks/servers/services/avorion/avorion.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Deploy Avorion service
|
||||
block:
|
||||
- name: Set Avorion directories
|
||||
ansible.builtin.set_fact:
|
||||
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
|
||||
avorion_data_dir: "/mnt/services/avorion"
|
||||
|
||||
- name: Create Avorion directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ avorion_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Avorion data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ avorion_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Avorion docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ avorion_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: avorion_compose
|
||||
|
||||
- name: Stop Avorion service
|
||||
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: avorion_compose.changed
|
||||
|
||||
- name: Start Avorion service
|
||||
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
|
||||
when: avorion_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- avorion
|
||||
15
ansible/tasks/servers/services/avorion/docker-compose.yml.j2
Normal file
15
ansible/tasks/servers/services/avorion/docker-compose.yml.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
avorion:
|
||||
image: rfvgyhn/avorion:latest
|
||||
volumes:
|
||||
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
|
||||
ports:
|
||||
- 27000:27000
|
||||
- 27000:27000/udp
|
||||
- 27003:27003/udp
|
||||
- 27020:27020/udp
|
||||
- 27021:27021/udp
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
37
ansible/tasks/servers/services/beszel/beszel.yml
Normal file
37
ansible/tasks/servers/services/beszel/beszel.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Deploy Beszel service
|
||||
block:
|
||||
- name: Set Beszel directories
|
||||
ansible.builtin.set_fact:
|
||||
beszel_service_dir: "{{ ansible_env.HOME }}/.services/beszel"
|
||||
beszel_data_dir: "/mnt/services/beszel"
|
||||
|
||||
- name: Create Beszel directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ beszel_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Beszel data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ beszel_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Beszel docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ beszel_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: beszel_compose
|
||||
|
||||
- name: Stop Beszel service
|
||||
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: beszel_compose.changed
|
||||
|
||||
- name: Start Beszel service
|
||||
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" up -d
|
||||
when: beszel_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- beszel
|
||||
37
ansible/tasks/servers/services/beszel/docker-compose.yml.j2
Normal file
37
ansible/tasks/servers/services/beszel/docker-compose.yml.j2
Normal file
@@ -0,0 +1,37 @@
|
||||
services:
|
||||
beszel:
|
||||
image: 'henrygd/beszel'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '8090:8090'
|
||||
volumes:
|
||||
- {{beszel_data_dir}}/data:/beszel_data
|
||||
- {{beszel_data_dir}}/socket:/beszel_socket
|
||||
networks:
|
||||
- beszel-net
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
|
||||
beszel-agent:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
volumes:
|
||||
- {{beszel_data_dir}}/socket:/beszel_socket
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
LISTEN: /beszel_socket/beszel.sock
|
||||
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKkSIQDh1vS8lG+2Uw/9dK1eOgCHVCgQfP+Bfk4XPkdn'
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
|
||||
networks:
|
||||
beszel-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
228
ansible/tasks/servers/services/caddy/Caddyfile.j2
Normal file
228
ansible/tasks/servers/services/caddy/Caddyfile.j2
Normal file
@@ -0,0 +1,228 @@
|
||||
# Global configuration for country blocking
|
||||
{
|
||||
servers {
|
||||
protocols h1 h2 h3
|
||||
}
|
||||
}
|
||||
|
||||
# Country blocking snippet using MaxMind GeoLocation - reusable across all sites
|
||||
{% if enable_country_blocking | default(false) and allowed_countries_codes | default([]) | length > 0 %}
|
||||
(country_block) {
|
||||
@allowed_local {
|
||||
remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
|
||||
}
|
||||
@not_allowed_countries {
|
||||
not remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
|
||||
not {
|
||||
maxmind_geolocation {
|
||||
db_path "/etc/caddy/geoip/GeoLite2-Country.mmdb"
|
||||
allow_countries {{ allowed_countries_codes | join(' ') }}
|
||||
}
|
||||
}
|
||||
}
|
||||
respond @not_allowed_countries "Access denied" 403
|
||||
}
|
||||
{% else %}
|
||||
(country_block) {
|
||||
# Country blocking disabled
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if inventory_hostname == 'mennos-desktop' %}
|
||||
git.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy gitea:3000
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
git.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://git.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
df.mvl.sh {
|
||||
import country_block
|
||||
redir / https://git.mvl.sh/vleeuwenmenno/dotfiles/raw/branch/master/setup.sh
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
fsm.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy factorio-server-manager:80
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
fsm.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://fsm.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
beszel.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy beszel:8090
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
beszel.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://beszel.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
photos.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy immich:2283
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
photos.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://photos.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
home.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy host.docker.internal:8123 {
|
||||
header_up Host {upstream_hostport}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
home.vleeuwen.me {
|
||||
import country_block
|
||||
reverse_proxy host.docker.internal:8123 {
|
||||
header_up Host {upstream_hostport}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
|
||||
unifi.mvl.sh {
|
||||
reverse_proxy unifi-controller:8443 {
|
||||
transport http {
|
||||
tls_insecure_skip_verify
|
||||
}
|
||||
header_up Host {host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
hotspot.mvl.sh {
|
||||
reverse_proxy unifi-controller:8843 {
|
||||
transport http {
|
||||
tls_insecure_skip_verify
|
||||
}
|
||||
header_up Host {host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
hotspot.mvl.sh:80 {
|
||||
redir https://hotspot.mvl.sh{uri} permanent
|
||||
}
|
||||
|
||||
bin.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy privatebin:8080
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
ip.mvl.sh ip.vleeuwen.me {
|
||||
import country_block
|
||||
reverse_proxy echoip:8080 {
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
http://ip.mvl.sh http://ip.vleeuwen.me {
|
||||
import country_block
|
||||
reverse_proxy echoip:8080 {
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
}
|
||||
|
||||
overseerr.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy overseerr:5055
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
overseerr.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://overseerr.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
plex.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy host.docker.internal:32400 {
|
||||
header_up Host {upstream_hostport}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
plex.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://plex.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
tautulli.mvl.sh {
|
||||
import country_block
|
||||
reverse_proxy host.docker.internal:8181 {
|
||||
header_up Host {upstream_hostport}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
tautulli.vleeuwen.me {
|
||||
import country_block
|
||||
redir https://tautulli.mvl.sh{uri}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
drive.mvl.sh drive.vleeuwen.me {
|
||||
import country_block
|
||||
|
||||
# CalDAV and CardDAV redirects
|
||||
redir /.well-known/carddav /remote.php/dav/ 301
|
||||
redir /.well-known/caldav /remote.php/dav/ 301
|
||||
|
||||
# Handle other .well-known requests
|
||||
handle /.well-known/* {
|
||||
reverse_proxy nextcloud:80 {
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
}
|
||||
|
||||
# Main reverse proxy configuration with proper headers
|
||||
reverse_proxy nextcloud:80 {
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
}
|
||||
|
||||
# Security headers
|
||||
header {
|
||||
# HSTS header for enhanced security (required by Nextcloud)
|
||||
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||
# Additional security headers recommended for Nextcloud
|
||||
X-Content-Type-Options "nosniff"
|
||||
X-Frame-Options "SAMEORIGIN"
|
||||
Referrer-Policy "no-referrer"
|
||||
X-XSS-Protection "1; mode=block"
|
||||
X-Permitted-Cross-Domain-Policies "none"
|
||||
X-Robots-Tag "noindex, nofollow"
|
||||
}
|
||||
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
{% endif %}
|
||||
15
ansible/tasks/servers/services/caddy/Dockerfile
Normal file
15
ansible/tasks/servers/services/caddy/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM caddy:2.9.1-builder AS builder
|
||||
|
||||
RUN xcaddy build \
|
||||
--with github.com/porech/caddy-maxmind-geolocation
|
||||
|
||||
FROM caddy:2.9.1-alpine
|
||||
|
||||
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
|
||||
|
||||
# Create directory for MaxMind databases and logs
|
||||
RUN mkdir -p /etc/caddy/geoip /var/log/caddy
|
||||
|
||||
EXPOSE 80 443
|
||||
|
||||
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]
|
||||
59
ansible/tasks/servers/services/caddy/caddy.yml
Normal file
59
ansible/tasks/servers/services/caddy/caddy.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: Deploy Caddy service
|
||||
block:
|
||||
- name: Set Caddy directories
|
||||
ansible.builtin.set_fact:
|
||||
caddy_service_dir: "{{ ansible_env.HOME }}/.services/caddy"
|
||||
caddy_data_dir: "/mnt/services/caddy"
|
||||
geoip_db_path: "/mnt/services/echoip"
|
||||
caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}"
|
||||
|
||||
- name: Create Caddy directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ caddy_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Setup country blocking
|
||||
ansible.builtin.include_tasks: country-blocking.yml
|
||||
|
||||
- name: Copy Dockerfile for custom Caddy build
|
||||
ansible.builtin.copy:
|
||||
src: Dockerfile
|
||||
dest: "{{ caddy_service_dir }}/Dockerfile"
|
||||
mode: "0644"
|
||||
register: caddy_dockerfile
|
||||
|
||||
- name: Create Caddy network
|
||||
ansible.builtin.command: docker network create caddy_default
|
||||
register: create_caddy_network
|
||||
failed_when:
|
||||
- create_caddy_network.rc != 0
|
||||
- "'already exists' not in create_caddy_network.stderr"
|
||||
changed_when: create_caddy_network.rc == 0
|
||||
|
||||
- name: Deploy Caddy docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ caddy_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: caddy_compose
|
||||
|
||||
- name: Deploy Caddy Caddyfile
|
||||
ansible.builtin.template:
|
||||
src: Caddyfile.j2
|
||||
dest: "{{ caddy_service_dir }}/Caddyfile"
|
||||
mode: "0644"
|
||||
register: caddy_file
|
||||
|
||||
- name: Stop Caddy service
|
||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: caddy_compose.changed or caddy_file.changed
|
||||
|
||||
- name: Start Caddy service
|
||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
|
||||
when: caddy_compose.changed or caddy_file.changed
|
||||
tags:
|
||||
- caddy
|
||||
- services
|
||||
- reverse-proxy
|
||||
50
ansible/tasks/servers/services/caddy/country-blocking.yml
Normal file
50
ansible/tasks/servers/services/caddy/country-blocking.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Country blocking setup for Caddy with MaxMind GeoLocation
|
||||
block:
|
||||
- name: Copy Dockerfile for custom Caddy build with GeoIP
|
||||
ansible.builtin.copy:
|
||||
src: Dockerfile
|
||||
dest: "{{ caddy_service_dir }}/Dockerfile"
|
||||
mode: "0644"
|
||||
when: enable_country_blocking | default(false)
|
||||
|
||||
- name: Check if MaxMind Country database is available
|
||||
ansible.builtin.stat:
|
||||
path: "{{ geoip_db_path }}/GeoLite2-Country.mmdb"
|
||||
register: maxmind_country_db
|
||||
when: enable_country_blocking | default(false)
|
||||
|
||||
- name: Ensure log directory exists for Caddy
|
||||
ansible.builtin.file:
|
||||
path: "{{ caddy_data_dir }}/logs"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
when: enable_country_blocking | default(false)
|
||||
|
||||
- name: Display country blocking configuration
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "✅ Country blocking enabled: {{ enable_country_blocking | default(false) }}"
|
||||
- "🛡️ Countries to allow: {{ allowed_countries_codes | default([]) | join(', ') }}"
|
||||
- "📍 Using MaxMind GeoLocation plugin"
|
||||
- "💾 Database path: /etc/caddy/geoip/GeoLite2-Country.mmdb"
|
||||
- "📊 Database available: {{ maxmind_country_db.stat.exists | default(false) }}"
|
||||
when: enable_country_blocking | default(false)
|
||||
|
||||
- name: Warn if MaxMind database not found
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "⚠️ WARNING: MaxMind Country database not found!"
|
||||
- "Expected location: {{ geoip_db_path }}/GeoLite2-Country.mmdb"
|
||||
- "Country blocking will not work until EchoIP service is deployed"
|
||||
- "Run: dotf update --ansible --tags echoip"
|
||||
when:
|
||||
- enable_country_blocking | default(false)
|
||||
- not maxmind_country_db.stat.exists | default(false)
|
||||
|
||||
tags:
|
||||
- caddy
|
||||
- security
|
||||
- country-blocking
|
||||
- geoip
|
||||
32
ansible/tasks/servers/services/caddy/docker-compose.yml.j2
Normal file
32
ansible/tasks/servers/services/caddy/docker-compose.yml.j2
Normal file
@@ -0,0 +1,32 @@
|
||||
services:
|
||||
caddy:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- {{ caddy_data_dir }}/data:/data
|
||||
- {{ caddy_data_dir }}/config:/config
|
||||
- {{ caddy_service_dir }}/Caddyfile:/etc/caddy/Caddyfile
|
||||
- {{ geoip_db_path }}:/etc/caddy/geoip:ro
|
||||
- {{ caddy_data_dir }}/logs:/var/log/caddy
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
name: caddy_default
|
||||
enable_ipv6: true
|
||||
324
ansible/tasks/servers/services/dashy/conf.yml.j2
Normal file
324
ansible/tasks/servers/services/dashy/conf.yml.j2
Normal file
@@ -0,0 +1,324 @@
|
||||
pageInfo:
|
||||
title: Menno's Home
|
||||
navLinks: []
|
||||
sections:
|
||||
- name: Selfhosted
|
||||
items:
|
||||
- title: Plex
|
||||
icon: http://mennos-desktop:4000/assets/plex.svg
|
||||
url: https://plex.mvl.sh
|
||||
statusCheckUrl: https://plex.mvl.sh/identity
|
||||
statusCheck: true
|
||||
id: 0_1035_plex
|
||||
- title: Tautulli
|
||||
icon: http://mennos-desktop:4000/assets/tautulli.svg
|
||||
url: https://tautulli.mvl.sh
|
||||
id: 1_1035_tautulli
|
||||
statusCheck: true
|
||||
- title: Overseerr
|
||||
icon: http://mennos-desktop:4000/assets/overseerr.svg
|
||||
url: https://overseerr.mvl.sh
|
||||
id: 2_1035_overseerr
|
||||
statusCheck: true
|
||||
- title: Immich
|
||||
icon: http://mennos-desktop:4000/assets/immich.svg
|
||||
url: https://photos.mvl.sh
|
||||
id: 3_1035_immich
|
||||
statusCheck: true
|
||||
- title: Nextcloud
|
||||
icon: http://mennos-desktop:4000/assets/nextcloud.svg
|
||||
url: https://drive.mvl.sh
|
||||
id: 3_1035_nxtcld
|
||||
statusCheck: true
|
||||
- title: ComfyUI
|
||||
icon: http://mennos-desktop:8188/assets/favicon.ico
|
||||
url: http://mennos-desktop:8188
|
||||
statusCheckUrl: http://host.docker.internal:8188/api/system_stats
|
||||
id: 3_1035_comfyui
|
||||
statusCheck: true
|
||||
displayData:
|
||||
sortBy: default
|
||||
rows: 1
|
||||
cols: 2
|
||||
collapsed: false
|
||||
hideForGuests: false
|
||||
- name: Media Management
|
||||
items:
|
||||
- title: Sonarr
|
||||
icon: http://mennos-desktop:4000/assets/sonarr.svg
|
||||
url: http://go/sonarr
|
||||
id: 0_1533_sonarr
|
||||
- title: Radarr
|
||||
icon: http://mennos-desktop:4000/assets/radarr.svg
|
||||
url: http://go/radarr
|
||||
id: 1_1533_radarr
|
||||
- title: Prowlarr
|
||||
icon: http://mennos-desktop:4000/assets/prowlarr.svg
|
||||
url: http://go/prowlarr
|
||||
id: 2_1533_prowlarr
|
||||
- title: Tdarr
|
||||
icon: http://mennos-desktop:4000/assets/tdarr.png
|
||||
url: http://go/tdarr
|
||||
id: 3_1533_tdarr
|
||||
- name: Kagi
|
||||
items:
|
||||
- title: Kagi Search
|
||||
icon: favicon
|
||||
url: https://kagi.com/
|
||||
id: 0_380_kagisearch
|
||||
- title: Kagi Translate
|
||||
icon: favicon
|
||||
url: https://translate.kagi.com/
|
||||
id: 1_380_kagitranslate
|
||||
- title: Kagi Assistant
|
||||
icon: favicon
|
||||
url: https://kagi.com/assistant
|
||||
id: 2_380_kagiassistant
|
||||
- name: News
|
||||
items:
|
||||
- title: Nu.nl
|
||||
icon: http://mennos-desktop:4000/assets/nunl.svg
|
||||
url: https://www.nu.nl/
|
||||
id: 0_380_nu
|
||||
- title: Tweakers.net
|
||||
icon: favicon
|
||||
url: https://www.tweakers.net/
|
||||
id: 1_380_tweakers
|
||||
- title: NL Times
|
||||
icon: favicon
|
||||
url: https://www.nltimes.nl/
|
||||
id: 2_380_nl_times
|
||||
- name: Downloaders
|
||||
items:
|
||||
- title: qBittorrent
|
||||
icon: http://mennos-desktop:4000/assets/qbittorrent.svg
|
||||
url: http://go/qbit
|
||||
id: 0_1154_qbittorrent
|
||||
tags:
|
||||
- download
|
||||
- torrent
|
||||
- yarr
|
||||
- title: Sabnzbd
|
||||
icon: http://mennos-desktop:4000/assets/sabnzbd.svg
|
||||
url: http://go/sabnzbd
|
||||
id: 1_1154_sabnzbd
|
||||
tags:
|
||||
- download
|
||||
- nzb
|
||||
- yarr
|
||||
- name: Git
|
||||
items:
|
||||
- title: GitHub
|
||||
icon: http://mennos-desktop:4000/assets/github.svg
|
||||
url: https://github.com/vleeuwenmenno
|
||||
id: 0_292_github
|
||||
tags:
|
||||
- repo
|
||||
- git
|
||||
- hub
|
||||
- title: Gitea
|
||||
icon: http://mennos-desktop:4000/assets/gitea.svg
|
||||
url: http://git.mvl.sh/vleeuwenmenno
|
||||
id: 1_292_gitea
|
||||
tags:
|
||||
- repo
|
||||
- git
|
||||
- tea
|
||||
- name: Server Monitoring
|
||||
items:
|
||||
- title: Beszel
|
||||
icon: http://mennos-desktop:4000/assets/beszel.svg
|
||||
url: http://go/beszel
|
||||
tags:
|
||||
- monitoring
|
||||
- logs
|
||||
id: 0_1725_beszel
|
||||
- title: Dozzle
|
||||
icon: http://mennos-desktop:4000/assets/dozzle.svg
|
||||
url: http://go/dozzle
|
||||
id: 1_1725_dozzle
|
||||
tags:
|
||||
- monitoring
|
||||
- logs
|
||||
- title: UpDown.io Status
|
||||
icon: far fa-signal
|
||||
url: http://go/status
|
||||
id: 2_1725_updowniostatus
|
||||
tags:
|
||||
- monitoring
|
||||
- logs
|
||||
- name: Tools
|
||||
items:
|
||||
- title: Home Assistant
|
||||
icon: http://mennos-desktop:4000/assets/home-assistant.svg
|
||||
url: http://go/homeassistant
|
||||
id: 0_529_homeassistant
|
||||
- title: Tailscale
|
||||
icon: http://mennos-desktop:4000/assets/tailscale.svg
|
||||
url: http://go/tailscale
|
||||
id: 1_529_tailscale
|
||||
- title: GliNet KVM
|
||||
icon: http://mennos-desktop:4000/assets/glinet.svg
|
||||
url: http://go/glkvm
|
||||
id: 2_529_glinetkvm
|
||||
- title: Unifi Network Controller
|
||||
icon: http://mennos-desktop:4000/assets/unifi.svg
|
||||
url: http://go/unifi
|
||||
id: 3_529_unifinetworkcontroller
|
||||
- title: Dashboard Icons
|
||||
icon: favicon
|
||||
url: https://dashboardicons.com/
|
||||
id: 4_529_dashboardicons
|
||||
- name: Weather
|
||||
items:
|
||||
- title: Buienradar
|
||||
icon: favicon
|
||||
url: https://www.buienradar.nl/weer/Beverwijk/NL/2758998
|
||||
id: 0_529_buienradar
|
||||
- title: ClearOutside
|
||||
icon: favicon
|
||||
url: https://clearoutside.com/forecast/52.49/4.66
|
||||
id: 1_529_clearoutside
|
||||
- title: Windy
|
||||
icon: favicon
|
||||
url: https://www.windy.com/
|
||||
id: 2_529_windy
|
||||
- title: Meteoblue
|
||||
icon: favicon
|
||||
url: https://www.meteoblue.com/en/country/weather/radar/the-netherlands_the-netherlands_2750405
|
||||
id: 2_529_meteoblue
|
||||
- name: DiscountOffice
|
||||
displayData:
|
||||
sortBy: default
|
||||
rows: 1
|
||||
cols: 3
|
||||
collapsed: false
|
||||
hideForGuests: false
|
||||
items:
|
||||
- title: DiscountOffice.nl
|
||||
icon: favicon
|
||||
url: https://discountoffice.nl/
|
||||
id: 0_1429_discountofficenl
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: DiscountOffice.be
|
||||
icon: favicon
|
||||
url: https://discountoffice.be/
|
||||
id: 1_1429_discountofficebe
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Admin NL
|
||||
icon: favicon
|
||||
url: https://discountoffice.nl/administrator
|
||||
id: 2_1429_adminnl
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Admin BE
|
||||
icon: favicon
|
||||
url: https://discountoffice.be/administrator
|
||||
id: 3_1429_adminbe
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Subsites
|
||||
icon: favicon
|
||||
url: https://elastomappen.nl
|
||||
id: 4_1429_subsites
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Proxmox
|
||||
icon: http://mennos-desktop:4000/assets/proxmox.svg
|
||||
url: https://www.transip.nl/cp/vps/prm/350680/
|
||||
id: 5_1429_proxmox
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Transip
|
||||
icon: favicon
|
||||
url: https://www.transip.nl/cp/vps/prm/350680/
|
||||
id: 6_1429_transip
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- title: Kibana
|
||||
icon: http://mennos-desktop:4000/assets/kibana.svg
|
||||
url: http://go/kibana
|
||||
id: 7_1429_kibana
|
||||
tags:
|
||||
- do
|
||||
- discount
|
||||
- work
|
||||
- name: Other
|
||||
items:
|
||||
- title: Whisparr
|
||||
icon: http://mennos-desktop:4000/assets/whisparr.svg
|
||||
url: http://go/whisparr
|
||||
id: 0_514_whisparr
|
||||
- title: Stash
|
||||
icon: http://mennos-desktop:4000/assets/stash.svg
|
||||
url: http://go/stash
|
||||
id: 1_514_stash
|
||||
displayData:
|
||||
sortBy: default
|
||||
rows: 1
|
||||
cols: 1
|
||||
collapsed: true
|
||||
hideForGuests: true
|
||||
appConfig:
|
||||
layout: auto
|
||||
iconSize: large
|
||||
theme: nord
|
||||
startingView: default
|
||||
defaultOpeningMethod: sametab
|
||||
statusCheck: false
|
||||
statusCheckInterval: 0
|
||||
routingMode: history
|
||||
enableMultiTasking: false
|
||||
widgetsAlwaysUseProxy: false
|
||||
webSearch:
|
||||
disableWebSearch: false
|
||||
searchEngine: https://kagi.com/search?q=
|
||||
openingMethod: newtab
|
||||
searchBangs: {}
|
||||
enableFontAwesome: true
|
||||
enableMaterialDesignIcons: false
|
||||
hideComponents:
|
||||
hideHeading: false
|
||||
hideNav: true
|
||||
hideSearch: false
|
||||
hideSettings: true
|
||||
hideFooter: false
|
||||
auth:
|
||||
enableGuestAccess: false
|
||||
users: []
|
||||
enableOidc: false
|
||||
oidc:
|
||||
adminRole: "false"
|
||||
adminGroup: "false"
|
||||
enableHeaderAuth: false
|
||||
headerAuth:
|
||||
userHeader: REMOTE_USER
|
||||
proxyWhitelist: []
|
||||
enableKeycloak: false
|
||||
showSplashScreen: false
|
||||
preventWriteToDisk: false
|
||||
preventLocalSave: false
|
||||
disableConfiguration: false
|
||||
disableConfigurationForNonAdmin: false
|
||||
allowConfigEdit: true
|
||||
enableServiceWorker: false
|
||||
disableContextMenu: false
|
||||
disableUpdateChecks: false
|
||||
disableSmartSort: false
|
||||
enableErrorReporting: false
|
||||
44
ansible/tasks/servers/services/dashy/dashy.yml
Normal file
44
ansible/tasks/servers/services/dashy/dashy.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Deploy Dashy service
|
||||
block:
|
||||
- name: Set Dashy directories
|
||||
ansible.builtin.set_fact:
|
||||
dashy_service_dir: "{{ ansible_env.HOME }}/.services/dashy"
|
||||
dashy_data_dir: "/mnt/services/dashy"
|
||||
|
||||
- name: Create Dashy directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dashy_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Dashy data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dashy_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Dashy docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ dashy_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: dashy_compose
|
||||
|
||||
- name: Deploy Dashy config.yml
|
||||
ansible.builtin.template:
|
||||
src: conf.yml.j2
|
||||
dest: "{{ dashy_data_dir }}/conf.yml"
|
||||
mode: "0644"
|
||||
register: dashy_config
|
||||
|
||||
- name: Stop Dashy service
|
||||
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: dashy_compose.changed
|
||||
|
||||
- name: Start Dashy service
|
||||
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" up -d
|
||||
when: dashy_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- dashy
|
||||
21
ansible/tasks/servers/services/dashy/docker-compose.yml.j2
Normal file
21
ansible/tasks/servers/services/dashy/docker-compose.yml.j2
Normal file
@@ -0,0 +1,21 @@
|
||||
services:
|
||||
dashy:
|
||||
image: lissy93/dashy:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 4000:8080
|
||||
volumes:
|
||||
- {{dashy_data_dir}}/:/app/user-data
|
||||
networks:
|
||||
- caddy_network
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
@@ -0,0 +1,71 @@
|
||||
name: downloaders
|
||||
services:
|
||||
gluetun:
|
||||
image: qmcgaw/gluetun:latest
|
||||
privileged: true
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
networks:
|
||||
- arr_stack_net
|
||||
ports:
|
||||
- 6881:6881
|
||||
- 6881:6881/udp
|
||||
- 8085:8085 # Qbittorrent
|
||||
devices:
|
||||
- /dev/net/tun:/dev/net/tun
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/gluetun-config:/gluetun
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- VPN_SERVICE_PROVIDER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='VPN_SERVICE_PROVIDER') }}
|
||||
- OPENVPN_USER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_USER') }}
|
||||
- OPENVPN_PASSWORD={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_PASSWORD') }}
|
||||
- SERVER_COUNTRIES={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='SERVER_COUNTRIES') }}
|
||||
restart: always
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
sabnzbd:
|
||||
image: lscr.io/linuxserver/sabnzbd:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/sabnzbd-config:/config
|
||||
- {{ local_data_dir }}:{{ local_data_dir }}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 7788:8080
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
qbittorrent:
|
||||
image: lscr.io/linuxserver/qbittorrent
|
||||
network_mode: "service:gluetun"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- WEBUI_PORT=8085
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/qbit-config:/config
|
||||
- {{ local_data_dir }}:{{ local_data_dir }}
|
||||
depends_on:
|
||||
gluetun:
|
||||
condition: service_healthy
|
||||
restart: always
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
networks:
|
||||
arr_stack_net:
|
||||
external: true
|
||||
name: arr_stack_net
|
||||
47
ansible/tasks/servers/services/downloaders/downloaders.yml
Normal file
47
ansible/tasks/servers/services/downloaders/downloaders.yml
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Deploy Downloaders service
|
||||
block:
|
||||
- name: Set Downloaders directories
|
||||
ansible.builtin.set_fact:
|
||||
local_data_dir: "/mnt/data"
|
||||
downloaders_service_dir: "{{ ansible_env.HOME }}/.services/downloaders"
|
||||
downloaders_data_dir: "/mnt/services/downloaders"
|
||||
|
||||
- name: Create Downloaders directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ downloaders_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Downloaders service directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ downloaders_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
become: true
|
||||
|
||||
- name: Deploy Downloaders docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ downloaders_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: downloaders_compose
|
||||
|
||||
- name: Ensure arr_stack_net Docker network exists
|
||||
community.docker.docker_network:
|
||||
name: arr_stack_net
|
||||
driver: bridge
|
||||
state: present
|
||||
|
||||
- name: Stop Downloaders service
|
||||
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: downloaders_compose.changed
|
||||
|
||||
- name: Start Downloaders service
|
||||
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" up -d
|
||||
when: downloaders_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- downloaders
|
||||
23
ansible/tasks/servers/services/dozzle/docker-compose.yml.j2
Normal file
23
ansible/tasks/servers/services/dozzle/docker-compose.yml.j2
Normal file
@@ -0,0 +1,23 @@
|
||||
services:
|
||||
dozzle:
|
||||
image: amir20/dozzle:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- 8800:8080
|
||||
environment:
|
||||
- DOZZLE_NO_ANALYTICS=true
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dozzle-net
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
|
||||
networks:
|
||||
dozzle-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
37
ansible/tasks/servers/services/dozzle/dozzle.yml
Normal file
37
ansible/tasks/servers/services/dozzle/dozzle.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Deploy Dozzle service
|
||||
block:
|
||||
- name: Set Dozzle directories
|
||||
ansible.builtin.set_fact:
|
||||
dozzle_service_dir: "{{ ansible_env.HOME }}/.services/dozzle"
|
||||
dozzle_data_dir: "/mnt/services/dozzle"
|
||||
|
||||
- name: Create Dozzle directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dozzle_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Dozzle data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dozzle_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Dozzle docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ dozzle_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: dozzle_compose
|
||||
|
||||
- name: Stop Dozzle service
|
||||
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: dozzle_compose.changed
|
||||
|
||||
- name: Start Dozzle service
|
||||
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" up -d
|
||||
when: dozzle_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- dozzle
|
||||
27
ansible/tasks/servers/services/echoip/docker-compose.yml.j2
Normal file
27
ansible/tasks/servers/services/echoip/docker-compose.yml.j2
Normal file
@@ -0,0 +1,27 @@
|
||||
services:
|
||||
echoip:
|
||||
container_name: 'echoip'
|
||||
image: 'mpolden/echoip:latest'
|
||||
restart: unless-stopped
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- caddy_network
|
||||
volumes:
|
||||
- {{echoip_data_dir}}/GeoLite2-ASN.mmdb:/opt/echoip/GeoLite2-ASN.mmdb:ro
|
||||
- {{echoip_data_dir}}/GeoLite2-City.mmdb:/opt/echoip/GeoLite2-City.mmdb:ro
|
||||
- {{echoip_data_dir}}/GeoLite2-Country.mmdb:/opt/echoip/GeoLite2-Country.mmdb:ro
|
||||
command: >
|
||||
-p -r -H "X-Forwarded-For" -l ":8080"
|
||||
-a /opt/echoip/GeoLite2-ASN.mmdb
|
||||
-c /opt/echoip/GeoLite2-City.mmdb
|
||||
-f /opt/echoip/GeoLite2-Country.mmdb
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
169
ansible/tasks/servers/services/echoip/echoip.yml
Normal file
169
ansible/tasks/servers/services/echoip/echoip.yml
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
- name: Deploy EchoIP service
|
||||
block:
|
||||
- name: Set EchoIP directories
|
||||
ansible.builtin.set_fact:
|
||||
echoip_service_dir: "{{ ansible_env.HOME }}/.services/echoip"
|
||||
echoip_data_dir: "/mnt/services/echoip"
|
||||
maxmind_account_id:
|
||||
"{{ lookup('community.general.onepassword', 'MaxMind',
|
||||
vault='Dotfiles', field='account_id') | regex_replace('\\s+', '') }}"
|
||||
maxmind_license_key:
|
||||
"{{ lookup('community.general.onepassword', 'MaxMind',
|
||||
vault='Dotfiles', field='license_key') | regex_replace('\\s+', '') }}"
|
||||
|
||||
# Requires: gather_facts: true in playbook
|
||||
- name: Check last update marker file
|
||||
ansible.builtin.stat:
|
||||
path: "{{ echoip_data_dir }}/.last_update"
|
||||
register: echoip_update_marker
|
||||
|
||||
- name: Determine if update is needed (older than 24h or missing)
|
||||
ansible.builtin.set_fact:
|
||||
update_needed: "{{ (not echoip_update_marker.stat.exists) or ((ansible_date_time.epoch | int) - (echoip_update_marker.stat.mtime | default(0) | int) > 86400) }}"
|
||||
|
||||
- name: Create EchoIP directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create EchoIP data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
# Only update databases if needed (max once per 24h)
|
||||
- block:
|
||||
# Touch the marker file BEFORE attempting download to prevent repeated attempts on failure
|
||||
- name: Update last update marker file (pre-download)
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/.last_update"
|
||||
state: touch
|
||||
|
||||
# Create directories for extracted databases
|
||||
- name: Create directory for ASN database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create directory for City database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-City"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create directory for Country database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-Country"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
# Download all databases
|
||||
- name: Download GeoLite2 ASN database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
- name: Download GeoLite2 City database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
- name: Download GeoLite2 Country database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
# Extract all databases
|
||||
- name: Extract GeoLite2 ASN database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
remote_src: true
|
||||
register: asn_extracted
|
||||
|
||||
- name: Extract GeoLite2 City database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-City"
|
||||
remote_src: true
|
||||
register: city_extracted
|
||||
|
||||
- name: Extract GeoLite2 Country database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-Country"
|
||||
remote_src: true
|
||||
register: country_extracted
|
||||
|
||||
# Move all databases to the correct locations
|
||||
- name: Move ASN database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-ASN -name GeoLite2-ASN.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-ASN.mmdb \\;"
|
||||
when: asn_extracted.changed
|
||||
|
||||
- name: Move City database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-City -name GeoLite2-City.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-City.mmdb \\;"
|
||||
when: city_extracted.changed
|
||||
|
||||
- name: Move Country database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-Country -name GeoLite2-Country.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-Country.mmdb \\;"
|
||||
when: country_extracted.changed
|
||||
|
||||
# Clean up unnecessary files
|
||||
- name: Remove downloaded tar.gz files
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted ASN folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
|
||||
- name: Remove downloaded City tar.gz file
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted City folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-City"
|
||||
|
||||
- name: Remove downloaded Country tar.gz file
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted Country folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-Country"
|
||||
|
||||
# Update the marker file (no longer needed here, already touched before download)
|
||||
when: update_needed
|
||||
|
||||
# Deploy and restart the EchoIP service
|
||||
- name: Deploy EchoIP docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ echoip_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: echoip_compose
|
||||
|
||||
- name: Stop EchoIP service
|
||||
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: echoip_compose.changed
|
||||
|
||||
- name: Start EchoIP service
|
||||
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" up -d
|
||||
when: echoip_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- echoip
|
||||
@@ -0,0 +1,31 @@
|
||||
services:
|
||||
factorio-server-manager:
|
||||
image: "ofsm/ofsm:latest"
|
||||
restart: "unless-stopped"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- "FACTORIO_VERSION=stable"
|
||||
- "RCON_PASS=458fc84534"
|
||||
ports:
|
||||
- "5080:80"
|
||||
- "34197:34197/udp"
|
||||
volumes:
|
||||
- {{ factorio_data_dir }}/fsm-data:/opt/fsm-data
|
||||
- {{ factorio_data_dir }}/factorio-data/saves:/opt/factorio/saves
|
||||
- {{ factorio_data_dir }}/factorio-data/mods:/opt/factorio/mods
|
||||
- {{ factorio_data_dir }}/factorio-data/config:/opt/factorio/config
|
||||
- {{ factorio_data_dir }}/factorio-data/mod_packs:/opt/fsm/mod_packs
|
||||
networks:
|
||||
- factorio
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
networks:
|
||||
factorio:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
31
ansible/tasks/servers/services/factorio/factorio.yml
Normal file
31
ansible/tasks/servers/services/factorio/factorio.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Deploy Factorio service
|
||||
block:
|
||||
- name: Set Factorio directories
|
||||
ansible.builtin.set_fact:
|
||||
factorio_service_dir: "{{ ansible_env.HOME }}/.services/factorio"
|
||||
factorio_data_dir: "/mnt/services/factorio"
|
||||
|
||||
- name: Create Factorio directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ factorio_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Factorio docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ factorio_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: factorio_compose
|
||||
|
||||
- name: Stop Factorio service
|
||||
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: factorio_compose.changed
|
||||
|
||||
- name: Start Factorio service
|
||||
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" up -d
|
||||
when: factorio_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- factorio
|
||||
@@ -0,0 +1,98 @@
|
||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||
|
||||
# You don't have to copy this file to your instance,
|
||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
||||
|
||||
log:
|
||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
level: info
|
||||
|
||||
runner:
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
# Extra environment variables to run jobs from a file.
|
||||
# It will be ignored if it's empty or the file doesn't exist.
|
||||
env_file: .env
|
||||
# The timeout for a job to be finished.
|
||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
timeout: 3h
|
||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
insecure: false
|
||||
# The timeout for fetching the job from the Gitea instance.
|
||||
fetch_timeout: 5s
|
||||
# The interval for fetching the job from the Gitea instance.
|
||||
fetch_interval: 2s
|
||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
|
||||
# If it's empty when registering, it will ask for inputting labels.
|
||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
labels:
|
||||
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 0
|
||||
# The external cache server URL. Valid only when enable is true.
|
||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
# The URL should generally end with "/".
|
||||
external_server: ""
|
||||
|
||||
container:
|
||||
# Specifies the network to which the container will connect.
|
||||
# Could be host, bridge or the name of a custom network.
|
||||
# If it's empty, act_runner will create a network automatically.
|
||||
network: ""
|
||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||
privileged: false
|
||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||
options:
|
||||
# The parent directory of a job's working directory.
|
||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
||||
# If the path starts with '/', the '/' will be trimmed.
|
||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
||||
# If it's empty, /workspace will be used.
|
||||
workdir_parent:
|
||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||
# valid_volumes:
|
||||
# - data
|
||||
# - /src/*.json
|
||||
# If you want to allow any volume, please use the following configuration:
|
||||
# valid_volumes:
|
||||
# - '**'
|
||||
valid_volumes: []
|
||||
# overrides the docker client host with the specified one.
|
||||
# If it's empty, act_runner will find an available docker host automatically.
|
||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
docker_host: ""
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: true
|
||||
# Rebuild docker image(s) even if already present
|
||||
force_rebuild: false
|
||||
|
||||
host:
|
||||
# The parent directory of a job's working directory.
|
||||
# If it's empty, $HOME/.cache/act/ will be used.
|
||||
workdir_parent: /tmp/act_runner
|
||||
66
ansible/tasks/servers/services/gitea/docker-compose.yml.j2
Normal file
66
ansible/tasks/servers/services/gitea/docker-compose.yml.j2
Normal file
@@ -0,0 +1,66 @@
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:latest
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
volumes:
|
||||
- {{gitea_data_dir}}/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3030:3000"
|
||||
- "22:22"
|
||||
networks:
|
||||
- gitea
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='POSTGRES_PASSWORD') }}
|
||||
- POSTGRES_DB=gitea
|
||||
volumes:
|
||||
- {{gitea_data_dir}}/postgres:/var/lib/postgresql/data
|
||||
networks:
|
||||
- gitea
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
act_runner:
|
||||
image: gitea/act_runner:latest
|
||||
volumes:
|
||||
- {{gitea_service_dir}}/act-runner-config.yaml:/config.yaml
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /tmp/act_runner:/tmp/act_runner
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- GITEA_INSTANCE_URL=https://git.mvl.sh
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='GITEA_RUNNER_REGISTRATION_TOKEN') }}
|
||||
- GITEA_RUNNER_NAME=act-worker
|
||||
- CONFIG_FILE=/config.yaml
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
44
ansible/tasks/servers/services/gitea/gitea.yml
Normal file
44
ansible/tasks/servers/services/gitea/gitea.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Deploy Gitea service
|
||||
block:
|
||||
- name: Set Gitea directories
|
||||
ansible.builtin.set_fact:
|
||||
gitea_data_dir: "/mnt/services/gitea"
|
||||
gitea_service_dir: "{{ ansible_env.HOME }}/.services/gitea"
|
||||
|
||||
- name: Create Gitea directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ gitea_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ gitea_data_dir }}"
|
||||
- "{{ gitea_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: gitea_dir
|
||||
|
||||
- name: Deploy Gitea docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ gitea_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: gitea_compose
|
||||
|
||||
- name: Deploy Gitea act-runner-config.yaml
|
||||
ansible.builtin.template:
|
||||
src: act-runner-config.yaml.j2
|
||||
dest: "{{ gitea_service_dir }}/act-runner-config.yaml"
|
||||
mode: "0644"
|
||||
register: gitea_act_runner_config
|
||||
|
||||
- name: Stop Gitea service
|
||||
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: gitea_compose.changed or gitea_act_runner_config.changed
|
||||
|
||||
- name: Start Gitea service
|
||||
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" up -d
|
||||
when: gitea_compose.changed or gitea_act_runner_config.changed
|
||||
|
||||
tags:
|
||||
- services
|
||||
- gitea
|
||||
14
ansible/tasks/servers/services/golink/docker-compose.yml.j2
Normal file
14
ansible/tasks/servers/services/golink/docker-compose.yml.j2
Normal file
@@ -0,0 +1,14 @@
|
||||
name: golink
|
||||
services:
|
||||
server:
|
||||
image: ghcr.io/tailscale/golink:main
|
||||
user: root
|
||||
environment:
|
||||
- TS_AUTHKEY={{ lookup('community.general.onepassword', 'GoLink', vault='Dotfiles', field='TS_AUTHKEY') }}
|
||||
volumes:
|
||||
- {{ golink_data_dir }}:/home/nonroot
|
||||
restart: "unless-stopped"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
36
ansible/tasks/servers/services/golink/golink.yml
Normal file
36
ansible/tasks/servers/services/golink/golink.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Deploy GoLink service
|
||||
block:
|
||||
- name: Set GoLink directories
|
||||
ansible.builtin.set_fact:
|
||||
golink_data_dir: "/mnt/services/golink"
|
||||
golink_service_dir: "{{ ansible_env.HOME }}/.services/golink"
|
||||
|
||||
- name: Create GoLink directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ golink_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ golink_data_dir }}"
|
||||
- "{{ golink_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: golink_dir
|
||||
|
||||
- name: Deploy GoLink docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ golink_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: golink_compose
|
||||
|
||||
- name: Stop GoLink service
|
||||
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: golink_compose.changed
|
||||
|
||||
- name: Start GoLink service
|
||||
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" up -d
|
||||
when: golink_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- golink
|
||||
@@ -0,0 +1,21 @@
|
||||
services:
|
||||
homeassistant:
|
||||
container_name: homeassistant
|
||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
||||
volumes:
|
||||
- "/var/run/dbus:/run/dbus:ro"
|
||||
- {{ homeassistant_data_dir }}:/config
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
network_mode: host
|
||||
devices:
|
||||
- /dev/ttyUSB0:/dev/ttyUSB0
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Deploy Home Assistant service
|
||||
block:
|
||||
- name: Set Home Assistant directories
|
||||
ansible.builtin.set_fact:
|
||||
homeassistant_data_dir: "/mnt/services/homeassistant"
|
||||
homeassistant_service_dir: "{{ ansible_env.HOME }}/.services/homeassistant"
|
||||
|
||||
- name: Create Home Assistant directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ homeassistant_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ homeassistant_data_dir }}"
|
||||
- "{{ homeassistant_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: homeassistant_dir
|
||||
|
||||
- name: Deploy Home Assistant docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ homeassistant_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: homeassistant_compose
|
||||
|
||||
- name: Stop Home Assistant service
|
||||
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: homeassistant_compose.changed
|
||||
|
||||
- name: Start Home Assistant service
|
||||
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" up -d
|
||||
when: homeassistant_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- homeassistant
|
||||
123
ansible/tasks/servers/services/immich/docker-compose.yml.j2
Normal file
123
ansible/tasks/servers/services/immich/docker-compose.yml.j2
Normal file
@@ -0,0 +1,123 @@
|
||||
services:
|
||||
immich:
|
||||
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||
volumes:
|
||||
- {{ immich_data_dir }}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- '2283:2283'
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich
|
||||
- caddy_network
|
||||
runtime: nvidia
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
|
||||
machine-learning:
|
||||
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
|
||||
volumes:
|
||||
- model-cache:/cache
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich
|
||||
runtime: nvidia
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 8G
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
environment:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
- {{ immich_database_dir }}:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
volumes:
|
||||
model-cache:
|
||||
|
||||
networks:
|
||||
immich:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
10
ansible/tasks/servers/services/immich/dotenv.j2
Normal file
10
ansible/tasks/servers/services/immich/dotenv.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
|
||||
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
TZ=Europe/Amsterdam
|
||||
|
||||
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
|
||||
IMMICH_VERSION=release
|
||||
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=postgres
|
||||
DB_DATABASE_NAME=immich
|
||||
44
ansible/tasks/servers/services/immich/immich.yml
Normal file
44
ansible/tasks/servers/services/immich/immich.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Deploy Immich service
|
||||
block:
|
||||
- name: Set Immich directories
|
||||
ansible.builtin.set_fact:
|
||||
immich_data_dir: "/mnt/data/photos/immich-library"
|
||||
immich_database_dir: "/mnt/services/immich/postgres"
|
||||
immich_service_dir: "{{ ansible_env.HOME }}/.services/immich"
|
||||
|
||||
- name: Create Immich directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ immich_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ immich_data_dir }}"
|
||||
- "{{ immich_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: immich_dir
|
||||
|
||||
- name: Deploy Immich docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ immich_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: immich_compose
|
||||
|
||||
- name: Deploy Immich .env
|
||||
ansible.builtin.template:
|
||||
src: dotenv.j2
|
||||
dest: "{{ immich_service_dir }}/.env"
|
||||
mode: "0644"
|
||||
register: immich_compose
|
||||
|
||||
- name: Stop Immich service
|
||||
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: immich_compose.changed
|
||||
|
||||
- name: Start Immich service
|
||||
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" up -d
|
||||
when: immich_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- immich
|
||||
@@ -0,0 +1,73 @@
|
||||
services:
|
||||
nextcloud:
|
||||
image: nextcloud
|
||||
container_name: nextcloud
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- nextcloud
|
||||
- caddy_network
|
||||
depends_on:
|
||||
- nextclouddb
|
||||
- redis
|
||||
ports:
|
||||
- 8081:80
|
||||
volumes:
|
||||
- {{ nextcloud_data_dir }}/nextcloud/html:/var/www/html
|
||||
- {{ nextcloud_data_dir }}/nextcloud/custom_apps:/var/www/html/custom_apps
|
||||
- {{ nextcloud_data_dir }}/nextcloud/config:/var/www/html/config
|
||||
- {{ nextcloud_data_dir }}/nextcloud/data:/var/www/html/data
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- MYSQL_DATABASE=nextcloud
|
||||
- MYSQL_USER=nextcloud
|
||||
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
|
||||
- MYSQL_HOST=nextclouddb
|
||||
- REDIS_HOST=redis
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
nextclouddb:
|
||||
image: mariadb:11.4.7
|
||||
container_name: nextcloud-db
|
||||
restart: unless-stopped
|
||||
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
|
||||
networks:
|
||||
- nextcloud
|
||||
volumes:
|
||||
- {{ nextcloud_data_dir }}/database:/var/lib/mysql
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- MYSQL_RANDOM_ROOT_PASSWORD=true
|
||||
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
|
||||
- MYSQL_DATABASE=nextcloud
|
||||
- MYSQL_USER=nextcloud
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: redis
|
||||
volumes:
|
||||
- {{ nextcloud_data_dir }}/redis:/data
|
||||
networks:
|
||||
- nextcloud
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
networks:
|
||||
nextcloud:
|
||||
name: nextcloud
|
||||
driver: bridge
|
||||
caddy_network:
|
||||
name: caddy_default
|
||||
external: true
|
||||
31
ansible/tasks/servers/services/nextcloud/nextcloud.yml
Normal file
31
ansible/tasks/servers/services/nextcloud/nextcloud.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Deploy Nextcloud service
|
||||
block:
|
||||
- name: Set Nextcloud directories
|
||||
ansible.builtin.set_fact:
|
||||
nextcloud_service_dir: "{{ ansible_env.HOME }}/.services/nextcloud"
|
||||
nextcloud_data_dir: "/mnt/services/nextcloud"
|
||||
|
||||
- name: Create Nextcloud directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nextcloud_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Nextcloud docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ nextcloud_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: nextcloud_compose
|
||||
|
||||
- name: Stop Nextcloud service
|
||||
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: nextcloud_compose.changed
|
||||
|
||||
- name: Start Nextcloud service
|
||||
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" up -d
|
||||
when: nextcloud_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- nextcloud
|
||||
29
ansible/tasks/servers/services/plex/docker-compose.yml.j2
Normal file
29
ansible/tasks/servers/services/plex/docker-compose.yml.j2
Normal file
@@ -0,0 +1,29 @@
|
||||
services:
|
||||
plex:
|
||||
image: lscr.io/linuxserver/plex:latest
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
runtime: nvidia
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- VERSION=docker
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||
volumes:
|
||||
- {{ plex_data_dir }}/config:/config
|
||||
- {{ plex_data_dir }}/transcode:/transcode
|
||||
- /mnt/data/movies:/movies
|
||||
- /mnt/data/tvshows:/tvshows
|
||||
- /mnt/object_storage/tvshows:/tvshows_slow
|
||||
- /mnt/data/music:/music
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
36
ansible/tasks/servers/services/plex/plex.yml
Normal file
36
ansible/tasks/servers/services/plex/plex.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Deploy Plex service
|
||||
block:
|
||||
- name: Set Plex directories
|
||||
ansible.builtin.set_fact:
|
||||
plex_data_dir: "/mnt/services/plex"
|
||||
plex_service_dir: "{{ ansible_env.HOME }}/.services/plex"
|
||||
|
||||
- name: Create Plex directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ plex_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ plex_data_dir }}"
|
||||
- "{{ plex_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: plex_dir
|
||||
|
||||
- name: Deploy Plex docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ plex_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: plex_compose
|
||||
|
||||
- name: Stop Plex service
|
||||
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: plex_compose.changed
|
||||
|
||||
- name: Start Plex service
|
||||
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" up -d
|
||||
when: plex_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- plex
|
||||
300
ansible/tasks/servers/services/privatebin/conf.php.j2
Normal file
300
ansible/tasks/servers/services/privatebin/conf.php.j2
Normal file
@@ -0,0 +1,300 @@
|
||||
;<?php http_response_code(403); /*
|
||||
; config file for PrivateBin
|
||||
;
|
||||
; An explanation of each setting can be find online at https://github.com/PrivateBin/PrivateBin/wiki/Configuration.
|
||||
|
||||
[main]
|
||||
; (optional) set a project name to be displayed on the website
|
||||
; name = "PrivateBin"
|
||||
|
||||
; The full URL, with the domain name and directories that point to the
|
||||
; PrivateBin files, including an ending slash (/). This URL is essential to
|
||||
; allow Opengraph images to be displayed on social networks.
|
||||
basepath = "https://bin.mvl.sh/"
|
||||
|
||||
; enable or disable the discussion feature, defaults to true
|
||||
discussion = false
|
||||
|
||||
; preselect the discussion feature, defaults to false
|
||||
opendiscussion = false
|
||||
|
||||
; enable or disable the display of dates & times in the comments, defaults to true
|
||||
; Note that internally the creation time will still get tracked in order to sort
|
||||
; the comments by creation time, but you can choose not to display them.
|
||||
; discussiondatedisplay = false
|
||||
|
||||
; enable or disable the password feature, defaults to true
|
||||
password = true
|
||||
|
||||
; enable or disable the file upload feature, defaults to false
|
||||
fileupload = false
|
||||
|
||||
; preselect the burn-after-reading feature, defaults to false
|
||||
burnafterreadingselected = false
|
||||
|
||||
; which display mode to preselect by default, defaults to "plaintext"
|
||||
; make sure the value exists in [formatter_options]
|
||||
defaultformatter = "plaintext"
|
||||
|
||||
; (optional) set a syntax highlighting theme, as found in css/prettify/
|
||||
; syntaxhighlightingtheme = "sons-of-obsidian"
|
||||
|
||||
; size limit per paste or comment in bytes, defaults to 10 Mebibytes
|
||||
sizelimit = 10485760
|
||||
|
||||
; by default PrivateBin use "bootstrap" template (tpl/bootstrap.php).
|
||||
; Optionally you can enable the template selection menu, which uses
|
||||
; a session cookie to store the choice until the browser is closed.
|
||||
templateselection = false
|
||||
|
||||
; List of available for selection templates when "templateselection" option is enabled
|
||||
availabletemplates[] = "bootstrap5"
|
||||
availabletemplates[] = "bootstrap"
|
||||
availabletemplates[] = "bootstrap-page"
|
||||
availabletemplates[] = "bootstrap-dark"
|
||||
availabletemplates[] = "bootstrap-dark-page"
|
||||
availabletemplates[] = "bootstrap-compact"
|
||||
availabletemplates[] = "bootstrap-compact-page"
|
||||
|
||||
; set the template your installs defaults to, defaults to "bootstrap" (tpl/bootstrap.php), also
|
||||
; bootstrap variants: "bootstrap-dark", "bootstrap-compact", "bootstrap-page",
|
||||
; which can be combined with "-dark" and "-compact" for "bootstrap-dark-page",
|
||||
; "bootstrap-compact-page" and finally "bootstrap5" (tpl/bootstrap5.php) - previews at:
|
||||
; https://privatebin.info/screenshots.html
|
||||
; template = "bootstrap"
|
||||
|
||||
; (optional) info text to display
|
||||
; use single, instead of double quotes for HTML attributes
|
||||
;info = "More information on the <a href='https://privatebin.info/'>project page</a>."
|
||||
|
||||
; (optional) notice to display
|
||||
; notice = "Note: This is a test service: Data may be deleted anytime. Kittens will die if you abuse this service."
|
||||
|
||||
; by default PrivateBin will guess the visitors language based on the browsers
|
||||
; settings. Optionally you can enable the language selection menu, which uses
|
||||
; a session cookie to store the choice until the browser is closed.
|
||||
languageselection = false
|
||||
|
||||
; set the language your installs defaults to, defaults to English
|
||||
; if this is set and language selection is disabled, this will be the only language
|
||||
; languagedefault = "en"
|
||||
|
||||
; (optional) URL shortener address to offer after a new paste is created.
|
||||
; It is suggested to only use this with self-hosted shorteners as this will leak
|
||||
; the pastes encryption key.
|
||||
; urlshortener = "https://shortener.example.com/api?link="
|
||||
|
||||
; (optional) Let users create a QR code for sharing the paste URL with one click.
|
||||
; It works both when a new paste is created and when you view a paste.
|
||||
qrcode = true
|
||||
|
||||
; (optional) Let users send an email sharing the paste URL with one click.
|
||||
; It works both when a new paste is created and when you view a paste.
|
||||
; email = true
|
||||
|
||||
; (optional) IP based icons are a weak mechanism to detect if a comment was from
|
||||
; a different user when the same username was used in a comment. It might get
|
||||
; used to get the IP of a comment poster if the server salt is leaked and a
|
||||
; SHA512 HMAC rainbow table is generated for all (relevant) IPs.
|
||||
; Can be set to one these values:
|
||||
; "none" / "identicon" / "jdenticon" (default) / "vizhash".
|
||||
; icon = "none"
|
||||
|
||||
; Content Security Policy headers allow a website to restrict what sources are
|
||||
; allowed to be accessed in its context. You need to change this if you added
|
||||
; custom scripts from third-party domains to your templates, e.g. tracking
|
||||
; scripts or run your site behind certain DDoS-protection services.
|
||||
; Check the documentation at https://content-security-policy.com/
|
||||
; Notes:
|
||||
; - If you use the bootstrap5 theme, you must change default-src to 'self' to
|
||||
; enable display of the svg icons
|
||||
; - By default this disallows to load images from third-party servers, e.g. when
|
||||
; they are embedded in pastes. If you wish to allow that, you can adjust the
|
||||
; policy here. See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-not-it-load-embedded-images
|
||||
; for details.
|
||||
; - The 'wasm-unsafe-eval' is used to enable webassembly support (used for zlib
|
||||
; compression). You can remove it if compression doesn't need to be supported.
|
||||
; - The 'unsafe-inline' style-src is used by Chrome when displaying PDF previews
|
||||
; and can be omitted if attachment upload is disabled (which is the default).
|
||||
; See https://issues.chromium.org/issues/343754409
|
||||
; - To allow displaying PDF previews in Firefox or Chrome, sandboxing must also
|
||||
; get turned off. The following CSP allows PDF previews:
|
||||
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self' 'unsafe-inline'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:"
|
||||
;
|
||||
; The recommended and default used CSP is:
|
||||
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:; sandbox allow-same-origin allow-scripts allow-forms allow-modals allow-downloads"
|
||||
|
||||
; stay compatible with PrivateBin Alpha 0.19, less secure
|
||||
; if enabled will use base64.js version 1.7 instead of 2.1.9 and sha1 instead of
|
||||
; sha256 in HMAC for the deletion token
|
||||
; zerobincompatibility = false
|
||||
|
||||
; Enable or disable the warning message when the site is served over an insecure
|
||||
; connection (insecure HTTP instead of HTTPS), defaults to true.
|
||||
; Secure transport methods like Tor and I2P domains are automatically whitelisted.
|
||||
; It is **strongly discouraged** to disable this.
|
||||
; See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-it-show-me-an-error-about-an-insecure-connection for more information.
|
||||
; httpwarning = true
|
||||
|
||||
; Pick compression algorithm or disable it. Only applies to pastes/comments
|
||||
; created after changing the setting.
|
||||
; Can be set to one these values: "none" / "zlib" (default).
|
||||
; compression = "zlib"
|
||||
|
||||
[expire]
|
||||
; expire value that is selected per default
|
||||
; make sure the value exists in [expire_options]
|
||||
default = "1week"
|
||||
|
||||
[expire_options]
|
||||
; Set each one of these to the number of seconds in the expiration period,
|
||||
; or 0 if it should never expire
|
||||
5min = 300
|
||||
10min = 600
|
||||
1hour = 3600
|
||||
1day = 86400
|
||||
1week = 604800
|
||||
; Well this is not *exactly* one month, it's 30 days:
|
||||
1month = 2592000
|
||||
1year = 31536000
|
||||
never = 0
|
||||
|
||||
[formatter_options]
|
||||
; Set available formatters, their order and their labels
|
||||
plaintext = "Plain Text"
|
||||
syntaxhighlighting = "Source Code"
|
||||
markdown = "Markdown"
|
||||
|
||||
[traffic]
|
||||
; time limit between calls from the same IP address in seconds
|
||||
; Set this to 0 to disable rate limiting.
|
||||
limit = 10
|
||||
|
||||
; (optional) Set IPs addresses (v4 or v6) or subnets (CIDR) which are exempted
|
||||
; from the rate-limit. Invalid IPs will be ignored. If multiple values are to
|
||||
; be exempted, the list needs to be comma separated. Leave unset to disable
|
||||
; exemptions.
|
||||
; exempted = "1.2.3.4,10.10.10/24"
|
||||
|
||||
; (optional) If you want only some source IP addresses (v4 or v6) or subnets
|
||||
; (CIDR) to be allowed to create pastes, set these here. Invalid IPs will be
|
||||
; ignored. If multiple values are to be exempted, the list needs to be comma
|
||||
; separated. Leave unset to allow anyone to create pastes.
|
||||
; creators = "1.2.3.4,10.10.10/24"
|
||||
|
||||
; (optional) if your website runs behind a reverse proxy or load balancer,
|
||||
; set the HTTP header containing the visitors IP address, i.e. X_FORWARDED_FOR
|
||||
; header = "X_FORWARDED_FOR"
|
||||
|
||||
[purge]
|
||||
; minimum time limit between two purgings of expired pastes, it is only
|
||||
; triggered when pastes are created
|
||||
; Set this to 0 to run a purge every time a paste is created.
|
||||
limit = 300
|
||||
|
||||
; maximum amount of expired pastes to delete in one purge
|
||||
; Set this to 0 to disable purging. Set it higher, if you are running a large
|
||||
; site
|
||||
batchsize = 10
|
||||
|
||||
[model]
|
||||
; name of data model class to load and directory for storage
|
||||
; the default model "Filesystem" stores everything in the filesystem
|
||||
class = Filesystem
|
||||
[model_options]
|
||||
dir = PATH "data"
|
||||
|
||||
;[model]
|
||||
; example of a Google Cloud Storage configuration
|
||||
;class = GoogleCloudStorage
|
||||
;[model_options]
|
||||
;bucket = "my-private-bin"
|
||||
;prefix = "pastes"
|
||||
;uniformacl = false
|
||||
|
||||
;[model]
|
||||
; example of DB configuration for MySQL
|
||||
;class = Database
|
||||
;[model_options]
|
||||
;dsn = "mysql:host=localhost;dbname=privatebin;charset=UTF8"
|
||||
;tbl = "privatebin_" ; table prefix
|
||||
;usr = "privatebin"
|
||||
;pwd = "Z3r0P4ss"
|
||||
;opt[12] = true ; PDO::ATTR_PERSISTENT
|
||||
|
||||
;[model]
|
||||
; example of DB configuration for SQLite
|
||||
;class = Database
|
||||
;[model_options]
|
||||
;dsn = "sqlite:" PATH "data/db.sq3"
|
||||
;usr = null
|
||||
;pwd = null
|
||||
;opt[12] = true ; PDO::ATTR_PERSISTENT
|
||||
|
||||
;[model]
|
||||
; example of DB configuration for PostgreSQL
|
||||
;class = Database
|
||||
;[model_options]
|
||||
;dsn = "pgsql:host=localhost;dbname=privatebin"
|
||||
;tbl = "privatebin_" ; table prefix
|
||||
;usr = "privatebin"
|
||||
;pwd = "Z3r0P4ss"
|
||||
;opt[12] = true ; PDO::ATTR_PERSISTENT
|
||||
|
||||
;[model]
|
||||
; example of S3 configuration for Rados gateway / CEPH
|
||||
;class = S3Storage
|
||||
;[model_options]
|
||||
;region = ""
|
||||
;version = "2006-03-01"
|
||||
;endpoint = "https://s3.my-ceph.invalid"
|
||||
;use_path_style_endpoint = true
|
||||
;bucket = "my-bucket"
|
||||
;accesskey = "my-rados-user"
|
||||
;secretkey = "my-rados-pass"
|
||||
|
||||
;[model]
|
||||
; example of S3 configuration for AWS
|
||||
;class = S3Storage
|
||||
;[model_options]
|
||||
;region = "eu-central-1"
|
||||
;version = "latest"
|
||||
;bucket = "my-bucket"
|
||||
;accesskey = "access key id"
|
||||
;secretkey = "secret access key"
|
||||
|
||||
;[model]
|
||||
; example of S3 configuration for AWS using its SDK default credential provider chain
|
||||
; if relying on environment variables, the AWS SDK will look for the following:
|
||||
; - AWS_ACCESS_KEY_ID
|
||||
; - AWS_SECRET_ACCESS_KEY
|
||||
; - AWS_SESSION_TOKEN (if needed)
|
||||
; for more details, see https://docs.aws.amazon.com/sdk-for-php/v3/developer-guide/guide_credentials.html#default-credential-chain
|
||||
;class = S3Storage
|
||||
;[model_options]
|
||||
;region = "eu-central-1"
|
||||
;version = "latest"
|
||||
;bucket = "my-bucket"
|
||||
|
||||
;[yourls]
|
||||
; When using YOURLS as a "urlshortener" config item:
|
||||
; - By default, "urlshortener" will point to the YOURLS API URL, with or without
|
||||
; credentials, and will be visible in public on the PrivateBin web page.
|
||||
; Only use this if you allow short URL creation without credentials.
|
||||
; - Alternatively, using the parameters in this section ("signature" and
|
||||
; "apiurl"), "urlshortener" needs to point to the base URL of your PrivateBin
|
||||
; instance with "?shortenviayourls&link=" appended. For example:
|
||||
; urlshortener = "${basepath}?shortenviayourls&link="
|
||||
; This URL will in turn call YOURLS on the server side, using the URL from
|
||||
; "apiurl" and the "access signature" from the "signature" parameters below.
|
||||
|
||||
; (optional) the "signature" (access key) issued by YOURLS for the using account
|
||||
; signature = ""
|
||||
; (optional) the URL of the YOURLS API, called to shorten a PrivateBin URL
|
||||
; apiurl = "https://yourls.example.com/yourls-api.php"
|
||||
|
||||
;[sri]
|
||||
; Subresource integrity (SRI) hashes used in template files. Uncomment and set
|
||||
; these for all js files used. See:
|
||||
; https://github.com/PrivateBin/PrivateBin/wiki/FAQ#user-content-how-to-make-privatebin-work-when-i-have-changed-some-javascript-files
|
||||
;js/privatebin.js = "sha512-[…]"
|
||||
@@ -0,0 +1,33 @@
|
||||
services:
|
||||
privatebin:
|
||||
image: privatebin/nginx-fpm-alpine:latest
|
||||
container_name: privatebin
|
||||
restart: always
|
||||
read_only: true
|
||||
user: "1000:1000"
|
||||
ports:
|
||||
- "8585:8080"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ privatebin_data_dir }}:/srv/data
|
||||
- {{ privatebin_service_dir }}/conf.php:/srv/cfg/conf.php:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 90s
|
||||
networks:
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
42
ansible/tasks/servers/services/privatebin/privatebin.yml
Normal file
42
ansible/tasks/servers/services/privatebin/privatebin.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Deploy PrivateBin service
|
||||
block:
|
||||
- name: Set PrivateBin directories
|
||||
ansible.builtin.set_fact:
|
||||
privatebin_data_dir: "/mnt/services/privatebin"
|
||||
privatebin_service_dir: "{{ ansible_env.HOME }}/.services/privatebin"
|
||||
|
||||
- name: Create PrivateBin directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ privatebin_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ privatebin_data_dir }}"
|
||||
- "{{ privatebin_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: privatebin_dir
|
||||
|
||||
- name: Deploy PrivateBin docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ privatebin_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: privatebin_compose
|
||||
|
||||
- name: Deploy PrivateBin conf.php
|
||||
ansible.builtin.template:
|
||||
src: conf.php.j2
|
||||
dest: "{{ privatebin_service_dir }}/conf.php"
|
||||
mode: "0644"
|
||||
|
||||
- name: Stop PrivateBin service
|
||||
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: privatebin_compose.changed
|
||||
|
||||
- name: Start PrivateBin service
|
||||
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" up -d
|
||||
when: privatebin_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- privatebin
|
||||
26
ansible/tasks/servers/services/redis/docker-compose.yml.j2
Normal file
26
ansible/tasks/servers/services/redis/docker-compose.yml.j2
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
juicefs-redis:
|
||||
image: redis:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- /mnt/services/redis:/data
|
||||
command: ["redis-server", "--appendonly", "yes", "--requirepass", "{{ REDIS_PASSWORD }}"]
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "-a", "{{ REDIS_PASSWORD }}", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
networks:
|
||||
- juicefs-network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
|
||||
networks:
|
||||
juicefs-network:
|
||||
80
ansible/tasks/servers/services/redis/redis.yml
Normal file
80
ansible/tasks/servers/services/redis/redis.yml
Normal file
@@ -0,0 +1,80 @@
|
||||
---
|
||||
- name: Deploy Redis for JuiceFS
|
||||
block:
|
||||
- name: Set Redis facts
|
||||
ansible.builtin.set_fact:
|
||||
redis_service_dir: "{{ ansible_env.HOME }}/.services/juicefs-redis"
|
||||
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
|
||||
|
||||
- name: Create Redis service directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ redis_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Redis docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ redis_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: redis_compose
|
||||
vars:
|
||||
REDIS_PASSWORD: "{{ redis_password }}"
|
||||
|
||||
- name: Check if juicefs.service exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/systemd/system/juicefs.service
|
||||
register: juicefs_service_stat
|
||||
|
||||
- name: Stop juicefs.service to umount JuiceFS
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
state: stopped
|
||||
enabled: false
|
||||
register: juicefs_stop
|
||||
changed_when: juicefs_stop.changed
|
||||
when: redis_compose.changed and juicefs_service_stat.stat.exists
|
||||
|
||||
- name: List containers that are running
|
||||
ansible.builtin.command: docker ps -q
|
||||
register: docker_ps
|
||||
changed_when: docker_ps.rc == 0
|
||||
when: redis_compose.changed
|
||||
|
||||
- name: Stop all docker containers
|
||||
ansible.builtin.command: docker stop {{ item }}
|
||||
loop: "{{ docker_ps.stdout_lines }}"
|
||||
register: docker_stop
|
||||
changed_when: docker_stop.rc == 0
|
||||
when: redis_compose.changed
|
||||
ignore_errors: true
|
||||
|
||||
- name: Start Redis service
|
||||
ansible.builtin.command: docker compose -f "{{ redis_service_dir }}/docker-compose.yml" up -d
|
||||
register: redis_start
|
||||
changed_when: redis_start.rc == 0
|
||||
|
||||
- name: Wait for Redis to be ready
|
||||
ansible.builtin.wait_for:
|
||||
host: localhost
|
||||
port: 6379
|
||||
timeout: 30
|
||||
|
||||
- name: Start juicefs.service to mount JuiceFS
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
state: started
|
||||
enabled: true
|
||||
register: juicefs_start
|
||||
changed_when: juicefs_start.changed
|
||||
when: juicefs_service_stat.stat.exists
|
||||
|
||||
- name: Restart containers that were stopped
|
||||
ansible.builtin.command: docker start {{ item }}
|
||||
loop: "{{ docker_stop.results | map(attribute='item') | list }}"
|
||||
register: docker_restart
|
||||
changed_when: docker_restart.rc == 0
|
||||
when: redis_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- redis
|
||||
43
ansible/tasks/servers/services/service_cleanup.yml
Normal file
43
ansible/tasks/servers/services/service_cleanup.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Cleanup disabled services
|
||||
block:
|
||||
- name: Prepare cleanup list
|
||||
ansible.builtin.set_fact:
|
||||
services_to_cleanup: "{{ services | selectattr('enabled', 'equalto', false) | list }}"
|
||||
|
||||
- name: Check service directories existence for disabled services
|
||||
ansible.builtin.stat:
|
||||
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
|
||||
register: service_dir_results
|
||||
loop: "{{ services_to_cleanup }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Filter services with existing directories
|
||||
ansible.builtin.set_fact:
|
||||
services_with_dirs: "{{ service_dir_results.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item') | list }}"
|
||||
|
||||
- name: Check if docker-compose file exists for services to cleanup
|
||||
ansible.builtin.stat:
|
||||
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}/docker-compose.yml"
|
||||
register: compose_file_results
|
||||
loop: "{{ services_with_dirs }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Stop disabled services with docker-compose files
|
||||
ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/.services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans
|
||||
loop: "{{ compose_file_results.results | selectattr('stat.exists', 'equalto', true) }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
register: service_stop_results
|
||||
become: false
|
||||
failed_when: false # Continue even if the command fails
|
||||
|
||||
- name: Remove service directories for disabled services
|
||||
ansible.builtin.file:
|
||||
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
|
||||
state: absent
|
||||
loop: "{{ services_with_dirs }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
41
ansible/tasks/servers/services/stash/docker-compose.yml.j2
Normal file
41
ansible/tasks/servers/services/stash/docker-compose.yml.j2
Normal file
@@ -0,0 +1,41 @@
|
||||
services:
|
||||
stash:
|
||||
image: stashapp/stash:latest
|
||||
container_name: stash
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9999:9999"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- STASH_STASH=/data/
|
||||
- STASH_GENERATED=/generated/
|
||||
- STASH_METADATA=/metadata/
|
||||
- STASH_CACHE=/cache/
|
||||
- STASH_PORT=9999
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
## Point this at your collection.
|
||||
- {{ stash_data_dir }}:/data
|
||||
|
||||
## Keep configs, scrapers, and plugins here.
|
||||
- {{ stash_config_dir }}/config:/root/.stash
|
||||
## This is where your stash's metadata lives
|
||||
- {{ stash_config_dir }}/metadata:/metadata
|
||||
## Any other cache content.
|
||||
- {{ stash_config_dir }}/cache:/cache
|
||||
## Where to store binary blob data (scene covers, images)
|
||||
- {{ stash_config_dir }}/blobs:/blobs
|
||||
## Where to store generated content (screenshots,previews,transcodes,sprites)
|
||||
- {{ stash_config_dir }}/generated:/generated
|
||||
networks:
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
37
ansible/tasks/servers/services/stash/stash.yml
Normal file
37
ansible/tasks/servers/services/stash/stash.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Deploy Stash service
|
||||
block:
|
||||
- name: Set Stash directories
|
||||
ansible.builtin.set_fact:
|
||||
stash_data_dir: "/mnt/data/stash"
|
||||
stash_config_dir: "/mnt/services/stash"
|
||||
stash_service_dir: "{{ ansible_env.HOME }}/.services/stash"
|
||||
|
||||
- name: Create Stash directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ stash_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ stash_data_dir }}"
|
||||
- "{{ stash_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: stash_dir
|
||||
|
||||
- name: Deploy Stash docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ stash_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: stash_compose
|
||||
|
||||
- name: Stop Stash service
|
||||
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: stash_compose.changed
|
||||
|
||||
- name: Start Stash service
|
||||
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" up -d
|
||||
when: stash_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- stash
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
services:
|
||||
tautulli:
|
||||
image: lscr.io/linuxserver/tautulli:latest
|
||||
container_name: tautulli
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Etc/Amsterdam
|
||||
volumes:
|
||||
- {{ tautulli_data_dir }}:/config
|
||||
ports:
|
||||
- 8181:8181
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- caddy_network
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
36
ansible/tasks/servers/services/tautulli/tautulli.yml
Normal file
36
ansible/tasks/servers/services/tautulli/tautulli.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Deploy Tautulli service
|
||||
block:
|
||||
- name: Set Tautulli directories
|
||||
ansible.builtin.set_fact:
|
||||
tautulli_data_dir: "{{ '/mnt/services/tautulli' }}"
|
||||
tautulli_service_dir: "{{ ansible_env.HOME }}/.services/tautulli"
|
||||
|
||||
- name: Create Tautulli directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ tautulli_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ tautulli_data_dir }}"
|
||||
- "{{ tautulli_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: tautulli_dir
|
||||
|
||||
- name: Deploy Tautulli docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ tautulli_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: tautulli_compose
|
||||
|
||||
- name: Stop Tautulli service
|
||||
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: tautulli_compose.changed
|
||||
|
||||
- name: Start Tautulli service
|
||||
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" up -d
|
||||
when: tautulli_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- tautulli
|
||||
@@ -0,0 +1,65 @@
|
||||
services:
|
||||
unifi-controller:
|
||||
image: linuxserver/unifi-network-application:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8080:8080" # Device communication
|
||||
- "8443:8443" # Controller GUI / API
|
||||
- "3478:3478/udp" # STUN
|
||||
- "10001:10001/udp" # AP discovery
|
||||
- "8880:8880" # HTTP portal redirect (guest hotspot)
|
||||
- "8843:8843" # HTTPS portal redirect (guest hotspot)
|
||||
- "6789:6789" # Mobile speed test (optional)
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- MONGO_USER=unifi
|
||||
- MONGO_PASS=unifi
|
||||
- MONGO_HOST=unifi-db
|
||||
- MONGO_PORT=27017
|
||||
- MONGO_DBNAME=unifi
|
||||
- MONGO_AUTHSOURCE=admin
|
||||
volumes:
|
||||
- {{ unifi_network_application_data_dir }}/data:/config
|
||||
depends_on:
|
||||
- unifi-db
|
||||
networks:
|
||||
- unifi-network
|
||||
- caddy_network
|
||||
sysctls:
|
||||
- net.ipv6.conf.all.disable_ipv6=1
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
unifi-db:
|
||||
image: mongo:6.0
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- {{ unifi_network_application_data_dir }}/db:/data/db
|
||||
- {{ unifi_network_application_data_dir }}/init-mongo.sh:/docker-entrypoint-initdb.d/init-mongo.sh:ro
|
||||
environment:
|
||||
- MONGO_INITDB_ROOT_USERNAME=root
|
||||
- MONGO_INITDB_ROOT_PASSWORD=root
|
||||
- MONGO_INITDB_DATABASE=unifi
|
||||
- MONGO_USER=unifi
|
||||
- MONGO_PASS=unifi
|
||||
- MONGO_DBNAME=unifi
|
||||
- MONGO_AUTHSOURCE=admin
|
||||
networks:
|
||||
- unifi-network
|
||||
sysctls:
|
||||
- net.ipv6.conf.all.disable_ipv6=1
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
|
||||
networks:
|
||||
unifi-network:
|
||||
driver: bridge
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
||||
@@ -0,0 +1,78 @@
|
||||
---
|
||||
- name: Deploy Unifi Network App service
|
||||
block:
|
||||
- name: Set Unifi Network App directories
|
||||
ansible.builtin.set_fact:
|
||||
unifi_network_application_data_dir: "/mnt/services/unifi_network_application"
|
||||
unifi_network_application_service_dir: "{{ ansible_env.HOME }}/.services/unifi_network_application"
|
||||
|
||||
- name: Create Unifi Network App directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ unifi_network_application_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ unifi_network_application_data_dir }}"
|
||||
- "{{ unifi_network_application_data_dir }}/data"
|
||||
- "{{ unifi_network_application_data_dir }}/db"
|
||||
- "{{ unifi_network_application_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: unifi_network_application_dir
|
||||
|
||||
- name: Create MongoDB initialization script
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
if which mongosh > /dev/null 2>&1; then
|
||||
mongo_init_bin='mongosh'
|
||||
else
|
||||
mongo_init_bin='mongo'
|
||||
fi
|
||||
"${mongo_init_bin}" <<EOF
|
||||
use ${MONGO_AUTHSOURCE}
|
||||
db.auth("${MONGO_INITDB_ROOT_USERNAME}", "${MONGO_INITDB_ROOT_PASSWORD}")
|
||||
db.createUser({
|
||||
user: "${MONGO_USER}",
|
||||
pwd: "${MONGO_PASS}",
|
||||
roles: [
|
||||
{ db: "${MONGO_DBNAME}", role: "dbOwner" },
|
||||
{ db: "${MONGO_DBNAME}_stat", role: "dbOwner" },
|
||||
{ db: "${MONGO_DBNAME}_audit", role: "dbOwner" }
|
||||
]
|
||||
})
|
||||
EOF
|
||||
dest: "{{ unifi_network_application_data_dir }}/init-mongo.sh"
|
||||
mode: "0755"
|
||||
register: unifi_mongo_init_script
|
||||
|
||||
- name: Deploy Unifi Network App docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ unifi_network_application_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: unifi_network_application_compose
|
||||
|
||||
- name: Clean MongoDB database for fresh initialization
|
||||
ansible.builtin.file:
|
||||
path: "{{ unifi_network_application_data_dir }}/db"
|
||||
state: absent
|
||||
when: unifi_mongo_init_script.changed
|
||||
|
||||
- name: Recreate MongoDB database directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ unifi_network_application_data_dir }}/db"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
when: unifi_mongo_init_script.changed
|
||||
|
||||
- name: Stop Unifi Network App service
|
||||
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
|
||||
|
||||
- name: Start Unifi Network App service
|
||||
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" up -d
|
||||
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
|
||||
tags:
|
||||
- services
|
||||
- unifi
|
||||
@@ -0,0 +1,23 @@
|
||||
services:
|
||||
wireguard:
|
||||
image: lscr.io/linuxserver/wireguard:latest
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- SERVERURL=mvl.sh
|
||||
- PEERS=worklaptop,phone,desktop,personallaptop
|
||||
- ALLOWEDIPS=0.0.0.0/0, ::/0
|
||||
volumes:
|
||||
- "{{ wireguard_data_dir }}/wg-data:/config"
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
sysctls:
|
||||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
31
ansible/tasks/servers/services/wireguard/wireguard.yml
Normal file
31
ansible/tasks/servers/services/wireguard/wireguard.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Deploy WireGuard service
|
||||
block:
|
||||
- name: Set WireGuard directories
|
||||
ansible.builtin.set_fact:
|
||||
wireguard_service_dir: "{{ ansible_env.HOME }}/.services/wireguard"
|
||||
wireguard_data_dir: "/mnt/services/wireguard"
|
||||
|
||||
- name: Create WireGuard directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ wireguard_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy WireGuard docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ wireguard_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: wireguard_compose
|
||||
|
||||
- name: Stop WireGuard service
|
||||
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: wireguard_compose.changed
|
||||
|
||||
- name: Start WireGuard service
|
||||
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" up -d
|
||||
when: wireguard_compose.changed
|
||||
tags:
|
||||
- services
|
||||
- wireguard
|
||||
Reference in New Issue
Block a user