From 03fd20cdac2172d81aee379f58701471eed4aad5 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 19:23:25 +0200 Subject: [PATCH 01/11] feat: update allowed countries --- config/ansible/group_vars/servers.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/config/ansible/group_vars/servers.yml b/config/ansible/group_vars/servers.yml index e240151..d95c7d0 100644 --- a/config/ansible/group_vars/servers.yml +++ b/config/ansible/group_vars/servers.yml @@ -18,6 +18,7 @@ allowed_countries_codes: - KR # South Korea - SK # Slovakia - FL # Finland + - DK # Denmark # IP ranges for blocked countries (generated automatically) # This will be populated by the country blocking script From faebace545572c524ccf1836377dd6f22148c37f Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 19:23:40 +0200 Subject: [PATCH 02/11] refactor: migrate arr-stack to mennos-cachyos-desktop --- config/ansible/tasks/servers/server.yml | 16 ++++++++-------- .../servers/services/arr-stack/arr-stack.yml | 2 +- .../services/arr-stack/docker-compose.yml.j2 | 16 +++++++--------- .../tasks/servers/services/caddy/Caddyfile.j2 | 18 ++++++++++++------ .../services/downloaders/docker-compose.yml.j2 | 6 ++---- .../services/downloaders/downloaders.yml | 1 - .../services/dozzle/docker-compose.yml.j2 | 2 +- 7 files changed, 31 insertions(+), 30 deletions(-) diff --git a/config/ansible/tasks/servers/server.yml b/config/ansible/tasks/servers/server.yml index 03178a9..873effb 100644 --- a/config/ansible/tasks/servers/server.yml +++ b/config/ansible/tasks/servers/server.yml @@ -26,14 +26,14 @@ reload: true become: true loop: - - { name: "fs.file-max", value: "2097152" } # Max open files for the entire system - - { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have - - { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory - - { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches - - { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket + - { name: "fs.file-max", value: "2097152" } # Max open files for the entire system + - { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have + - { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory + - { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches + - { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket - { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input - - { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state - - { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections + - { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state + - { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections - name: Include service tasks ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml" @@ -107,7 +107,7 @@ hosts: - mennos-cachyos-desktop - name: arr-stack - enabled: false + enabled: true hosts: - mennos-cachyos-desktop - name: home-assistant diff --git a/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml b/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml index e4b27dd..57b6e97 100644 --- a/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml +++ b/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml @@ -4,7 +4,7 @@ - name: Set ArrStack directories ansible.builtin.set_fact: arr_stack_service_dir: "{{ ansible_env.HOME }}/services/arr-stack" - arr_stack_data_dir: "/mnt/object_storage/services/arr-stack" + arr_stack_data_dir: "/mnt/services/arr-stack" - name: Create ArrStack directory ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2 b/config/ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2 index 97ce74f..6adf644 100644 --- a/config/ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2 +++ b/config/ansible/tasks/servers/services/arr-stack/docker-compose.yml.j2 @@ -13,7 +13,7 @@ services: - host.docker.internal:host-gateway volumes: - {{ arr_stack_data_dir }}/radarr-config:/config - - /mnt/object_storage:/storage + - /mnt/data:/mnt/data restart: "unless-stopped" networks: - arr_stack_net @@ -27,7 +27,7 @@ services: - TZ=Europe/Amsterdam volumes: - {{ arr_stack_data_dir }}/sonarr-config:/config - - /mnt/object_storage:/storage + - /mnt/data:/mnt/data ports: - 8989:8989 extra_hosts: @@ -43,12 +43,12 @@ services: - PGID=100 - TZ=Europe/Amsterdam ports: - - 8686:8686 + - 6969:6969 extra_hosts: - host.docker.internal:host-gateway volumes: - {{ arr_stack_data_dir }}/whisparr-config:/config - - /mnt/object_storage:/storage + - /mnt/data:/mnt/data restart: unless-stopped networks: - arr_stack_net @@ -86,15 +86,14 @@ services: networks: - arr_stack_net - jellyseerr: - image: fallenbagel/jellyseerr - container_name: jellyseerr + overseerr: + image: sctx/overseerr:latest environment: - PUID=1000 - PGID=100 - TZ=Europe/Amsterdam volumes: - - {{ arr_stack_data_dir }}/jellyseerr-config:/app/config + - {{ arr_stack_data_dir }}/overseerr-config:/app/config ports: - 5055:5055 extra_hosts: @@ -106,7 +105,6 @@ services: networks: arr_stack_net: - name: arr_stack_net caddy_network: external: true name: caddy_default diff --git a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 index 72a79b2..eca880a 100644 --- a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 +++ b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 @@ -155,9 +155,15 @@ http://ip.mvl.sh http://ip.vleeuwen.me { } } -overseerr.mvl.sh overseerr.vleeuwen.me { +overseerr.mvl.sh { import country_block - reverse_proxy host.docker.internal:5555 + reverse_proxy overseerr:5055 + tls {{ caddy_email }} +} + +overseerr.vleeuwen.me { + import country_block + redir https://overseerr.mvl.sh tls {{ caddy_email }} } @@ -175,11 +181,11 @@ plex.mvl.sh plex.vleeuwen.me { drive.mvl.sh drive.vleeuwen.me { import country_block - + # CalDAV and CardDAV redirects redir /.well-known/carddav /remote.php/dav/ 301 redir /.well-known/caldav /remote.php/dav/ 301 - + # Handle other .well-known requests handle /.well-known/* { reverse_proxy nextcloud:80 { @@ -199,7 +205,7 @@ drive.mvl.sh drive.vleeuwen.me { header_up X-Forwarded-Proto {scheme} header_up X-Forwarded-Host {host} } - + # Security headers header { # HSTS header for enhanced security (required by Nextcloud) @@ -212,7 +218,7 @@ drive.mvl.sh drive.vleeuwen.me { X-Permitted-Cross-Domain-Policies "none" X-Robots-Tag "noindex, nofollow" } - + tls {{ caddy_email }} } diff --git a/config/ansible/tasks/servers/services/downloaders/docker-compose.yml.j2 b/config/ansible/tasks/servers/services/downloaders/docker-compose.yml.j2 index d84fb66..492d026 100644 --- a/config/ansible/tasks/servers/services/downloaders/docker-compose.yml.j2 +++ b/config/ansible/tasks/servers/services/downloaders/docker-compose.yml.j2 @@ -33,8 +33,7 @@ services: - TZ=Europe/Amsterdam volumes: - {{ downloaders_data_dir }}/sabnzbd-config:/config - - {{ object_storage_dir }}:/storage - - {{ local_data_dir }}:/local + - {{ local_data_dir }}:{{ local_data_dir }} restart: unless-stopped network_mode: "service:gluetun" depends_on: @@ -51,8 +50,7 @@ services: - TZ=Europe/Amsterdam volumes: - {{ downloaders_data_dir }}/qbit-config:/config - - {{ object_storage_dir }}:/storage - - {{ local_data_dir }}:/local + - {{ local_data_dir }}:{{ local_data_dir }} depends_on: gluetun: condition: service_healthy diff --git a/config/ansible/tasks/servers/services/downloaders/downloaders.yml b/config/ansible/tasks/servers/services/downloaders/downloaders.yml index 86b4205..d09cc4f 100644 --- a/config/ansible/tasks/servers/services/downloaders/downloaders.yml +++ b/config/ansible/tasks/servers/services/downloaders/downloaders.yml @@ -3,7 +3,6 @@ block: - name: Set Downloaders directories ansible.builtin.set_fact: - object_storage_dir: "/mnt/object_storage" local_data_dir: "/mnt/data" downloaders_service_dir: "{{ ansible_env.HOME }}/services/downloaders" downloaders_data_dir: "/mnt/services/downloaders" diff --git a/config/ansible/tasks/servers/services/dozzle/docker-compose.yml.j2 b/config/ansible/tasks/servers/services/dozzle/docker-compose.yml.j2 index ec3cc61..16ded45 100644 --- a/config/ansible/tasks/servers/services/dozzle/docker-compose.yml.j2 +++ b/config/ansible/tasks/servers/services/dozzle/docker-compose.yml.j2 @@ -4,7 +4,7 @@ services: volumes: - /var/run/docker.sock:/var/run/docker.sock ports: - - 8686:8080 + - 8800:8080 environment: - DOZZLE_NO_ANALYTICS=true restart: unless-stopped From 97d616b7ed6f4b5df388a2ca8f1f07540e4f1dc8 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 21:33:47 +0200 Subject: [PATCH 03/11] Cleanup --- config/ansible/tasks/workstations/flatpaks.yml | 1 - config/ansible/tasks/workstations/snaps.yml | 2 -- config/ansible/tasks/workstations/workstation.yml | 5 ----- zed/settings.json | 3 +-- 4 files changed, 1 insertion(+), 10 deletions(-) diff --git a/config/ansible/tasks/workstations/flatpaks.yml b/config/ansible/tasks/workstations/flatpaks.yml index 362acea..d4f3e6f 100644 --- a/config/ansible/tasks/workstations/flatpaks.yml +++ b/config/ansible/tasks/workstations/flatpaks.yml @@ -61,7 +61,6 @@ - com.usebottles.bottles - com.github.tchx84.Flatseal - com.github.wwmm.easyeffects - - org.onlyoffice.desktopeditors - io.gitlab.adhami3310.Impression - io.ente.auth - io.github.fastrizwaan.WineZGUI diff --git a/config/ansible/tasks/workstations/snaps.yml b/config/ansible/tasks/workstations/snaps.yml index 4c5bc9f..d79e424 100644 --- a/config/ansible/tasks/workstations/snaps.yml +++ b/config/ansible/tasks/workstations/snaps.yml @@ -45,8 +45,6 @@ desired_snaps: - name: beekeeper-studio classic: false - - name: steam - classic: false - name: Install desired Snap packages ansible.builtin.command: "snap install {{ item.name }} {{ '--classic' if item.classic else '' }}" diff --git a/config/ansible/tasks/workstations/workstation.yml b/config/ansible/tasks/workstations/workstation.yml index 93a3568..8838896 100644 --- a/config/ansible/tasks/workstations/workstation.yml +++ b/config/ansible/tasks/workstations/workstation.yml @@ -46,11 +46,6 @@ - name: Ensure workstation common packages are installed ansible.builtin.package: name: - ###### THE FOLLOWING PACKAGES ARE DISABLED DUE TO MISSING ON UBUNTU REPOS ###### - # Steam and it's dependencies - # - steam - # - steam-devices - ################################################################################ # Statistics HUD for gaming - mangohud # Used for VSCode Extensions diff --git a/zed/settings.json b/zed/settings.json index 6a259cf..70ec8b2 100644 --- a/zed/settings.json +++ b/zed/settings.json @@ -22,7 +22,7 @@ "theme": { "mode": "system", "light": "Catppuccin Latte", - "dark": "Catppuccin FrappΓ©" + "dark": "Catppuccin Macchiato" }, "tabs": { "close_position": "right", @@ -59,7 +59,6 @@ "provider": "zed.dev", "model": "claude-sonnet-4" }, - "version": "2" }, "edit_predictions": { "mode": "subtle", From 506e5680218107ffc2ca528cc326af13a64c160c Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 21:53:07 +0200 Subject: [PATCH 04/11] Add SG, AT and CH to allowed countries list --- config/ansible/group_vars/servers.yml | 33 +++++++++++++++------------ 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/config/ansible/group_vars/servers.yml b/config/ansible/group_vars/servers.yml index d95c7d0..8f0b12c 100644 --- a/config/ansible/group_vars/servers.yml +++ b/config/ansible/group_vars/servers.yml @@ -4,21 +4,24 @@ install_ui_apps: false # Countries that are allowed to access the server Caddy reverse proxy allowed_countries_codes: - - US # United States - - CA # Canada - - GB # United Kingdom - - DE # Germany - - FR # France - - ES # Spain - - IT # Italy - - NL # Netherlands - - AU # Australia - - NZ # New Zealand - - JP # Japan - - KR # South Korea - - SK # Slovakia - - FL # Finland - - DK # Denmark + - US # United States + - CA # Canada + - GB # United Kingdom + - DE # Germany + - FR # France + - ES # Spain + - IT # Italy + - NL # Netherlands + - AU # Australia + - NZ # New Zealand + - JP # Japan + - KR # South Korea + - SK # Slovakia + - FL # Finland + - DK # Denmark + - SG # Singapore + - AT # Austria + - CH # Switzerland # IP ranges for blocked countries (generated automatically) # This will be populated by the country blocking script From 4242e037b0174546e6789bf1019e327dfad75b0f Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 21:53:22 +0200 Subject: [PATCH 05/11] Remove redundant X-Forwarded headers and redirect domains --- .../tasks/servers/services/caddy/Caddyfile.j2 | 30 +++++-------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 index eca880a..4f42aea 100644 --- a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 +++ b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 @@ -95,9 +95,6 @@ home.vleeuwen.me { reverse_proxy host.docker.internal:8123 { header_up Host {upstream_hostport} header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } tls {{ caddy_email }} } @@ -108,7 +105,6 @@ unifi.mvl.sh { tls_insecure_skip_verify } header_up Host {host} - header_up X-Forwarded-Proto https } tls {{ caddy_email }} } @@ -119,7 +115,6 @@ hotspot.mvl.sh { tls_insecure_skip_verify } header_up Host {host} - header_up X-Forwarded-Proto https } tls {{ caddy_email }} } @@ -138,9 +133,6 @@ ip.mvl.sh ip.vleeuwen.me { import country_block reverse_proxy echoip:8080 { header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } tls {{ caddy_email }} } @@ -149,9 +141,6 @@ http://ip.mvl.sh http://ip.vleeuwen.me { import country_block reverse_proxy echoip:8080 { header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } } @@ -163,22 +152,25 @@ overseerr.mvl.sh { overseerr.vleeuwen.me { import country_block - redir https://overseerr.mvl.sh + redir https://overseerr.mvl.sh{uri} tls {{ caddy_email }} } -plex.mvl.sh plex.vleeuwen.me { +plex.mvl.sh { import country_block reverse_proxy host.docker.internal:32400 { header_up Host {upstream_hostport} header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } tls {{ caddy_email }} } +plex.vleeuwen.me { + import country_block + redir https://plex.mvl.sh{uri} + tls {{ caddy_email }} +} + drive.mvl.sh drive.vleeuwen.me { import country_block @@ -191,9 +183,6 @@ drive.mvl.sh drive.vleeuwen.me { reverse_proxy nextcloud:80 { header_up Host {host} header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } } @@ -201,9 +190,6 @@ drive.mvl.sh drive.vleeuwen.me { reverse_proxy nextcloud:80 { header_up Host {host} header_up X-Real-IP {http.request.remote.host} - header_up X-Forwarded-For {http.request.remote.host} - header_up X-Forwarded-Proto {scheme} - header_up X-Forwarded-Host {host} } # Security headers From 43cc1861342caae43306667c88b0979abb9b3b88 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 22:09:07 +0200 Subject: [PATCH 06/11] Fix incorrect Finland country code and updated home assitant domain --- config/ansible/group_vars/servers.yml | 2 +- config/ansible/tasks/servers/services/caddy/Caddyfile.j2 | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/config/ansible/group_vars/servers.yml b/config/ansible/group_vars/servers.yml index 8f0b12c..f2f2121 100644 --- a/config/ansible/group_vars/servers.yml +++ b/config/ansible/group_vars/servers.yml @@ -17,7 +17,7 @@ allowed_countries_codes: - JP # Japan - KR # South Korea - SK # Slovakia - - FL # Finland + - FI # Finland - DK # Denmark - SG # Singapore - AT # Austria diff --git a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 index 4f42aea..10f6e42 100644 --- a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 +++ b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 @@ -90,7 +90,7 @@ photos.vleeuwen.me { tls {{ caddy_email }} } -home.vleeuwen.me { +home.mvl.sh { import country_block reverse_proxy host.docker.internal:8123 { header_up Host {upstream_hostport} @@ -99,6 +99,12 @@ home.vleeuwen.me { tls {{ caddy_email }} } +home.vleeuwen.me { + import country_block + redir https://home.mvl.sh{uri} + tls {{ caddy_email }} +} + unifi.mvl.sh { reverse_proxy unifi-controller:8443 { transport http { From d6600630bc41f51969ae07865a18498630d59e51 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Tue, 22 Jul 2025 23:26:31 +0200 Subject: [PATCH 07/11] Remove cloud server configuration files and references and add dynmamic dns Shit --- config/ansible/docs/dynamic-dns-setup.md | 124 +++ config/ansible/inventory.ini | 1 - config/ansible/playbook.yml | 2 +- config/ansible/tasks/global/symlinks.yml | 67 +- .../tasks/global/utils/dynamic-dns-cf.go | 903 ++++++++++++++++++ config/ansible/tasks/servers/dynamic-dns.yml | 99 ++ config/ansible/tasks/servers/juicefs.yml | 2 +- config/ansible/tasks/servers/server.yml | 9 +- .../tasks/servers/services/caddy/Caddyfile.j2 | 10 +- .../services/redis/docker-compose.yml.j2 | 2 +- .../uptime-kuma/docker-compose.yml.j2 | 22 - .../services/uptime-kuma/uptime-kuma.yml | 31 - .../ansible/tasks/workstations/flatpaks.yml | 3 - config/ansible/templates/dynamic-dns.env.j2 | 12 + config/ansible/templates/juicefs.service.j2 | 2 +- config/home-manager/flake.nix | 14 - .../common/hosts/mennos-cloud-server.nix | 4 - .../home-manager/packages/common/packages.nix | 2 - 18 files changed, 1184 insertions(+), 125 deletions(-) create mode 100644 config/ansible/docs/dynamic-dns-setup.md create mode 100644 config/ansible/tasks/global/utils/dynamic-dns-cf.go create mode 100644 config/ansible/tasks/servers/dynamic-dns.yml delete mode 100644 config/ansible/tasks/servers/services/uptime-kuma/docker-compose.yml.j2 delete mode 100644 config/ansible/tasks/servers/services/uptime-kuma/uptime-kuma.yml create mode 100644 config/ansible/templates/dynamic-dns.env.j2 delete mode 100644 config/home-manager/packages/common/hosts/mennos-cloud-server.nix diff --git a/config/ansible/docs/dynamic-dns-setup.md b/config/ansible/docs/dynamic-dns-setup.md new file mode 100644 index 0000000..1d84cd5 --- /dev/null +++ b/config/ansible/docs/dynamic-dns-setup.md @@ -0,0 +1,124 @@ +# Dynamic DNS OnePassword Setup + +This document explains how to set up the required OnePassword entries for the Dynamic DNS automation. + +## Overview + +The Dynamic DNS task automatically retrieves credentials from OnePassword using the Ansible OnePassword lookup plugin. This eliminates the need for vault files and provides better security. + +## Required OnePassword Entries + +### 1. CloudFlare API Token + +**Location:** `CloudFlare API Token` in `Dotfiles` vault, field `password` + +**Setup Steps:** + +1. Go to [CloudFlare API Tokens](https://dash.cloudflare.com/profile/api-tokens) +2. Click "Create Token" +3. Use the "Edit zone DNS" template +4. Configure permissions: + - Zone: DNS: Edit + - Zone Resources: Include all zones (or specific zones for your domains) +5. Add IP address filtering if desired (optional but recommended) +6. Click "Continue to summary" and "Create Token" +7. Copy the token and save it in OnePassword: + - Title: `CloudFlare API Token` + - Vault: `Dotfiles` + - Field: `password` (this should be the main password field) + +### 2. Telegram Bot Credentials + +**Location:** `Telegram DynDNS Bot` in `Dotfiles` vault, fields `password` and `chat_id` + +**Setup Steps:** + +#### Create Telegram Bot: + +1. Message [@BotFather](https://t.me/BotFather) on Telegram +2. Send `/start` then `/newbot` +3. Follow the prompts to create your bot +4. Save the bot token (format: `123456789:ABCdefGHijklMNopQRstUVwxyz`) + +#### Get Chat ID: + +1. Send any message to your new bot +2. Visit: `https://api.telegram.org/bot/getUpdates` +3. Look for `"chat":{"id":YOUR_CHAT_ID}` in the response +4. Save the chat ID (format: `987654321` or `-987654321` for groups) + +#### Save in OnePassword: + +- Title: `Telegram DynDNS Bot` +- Vault: `Dotfiles` +- Fields: + - `password`: Your bot token (123456789:ABCdefGHijklMNopQRstUVwxyz) + - `chat_id`: Your chat ID (987654321) + +## Verification + +You can test that the OnePassword lookups work by running: + +```bash +# Test CloudFlare token lookup +ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}" + +# Test Telegram bot token +ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}" + +# Test Telegram chat ID +ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}" +``` + +## Security Notes + +- Credentials are never stored in version control +- Environment file (`~/.local/bin/dynamic-dns.env`) has 600 permissions +- OnePassword CLI must be authenticated before running Ansible +- Make sure to run `op signin` before executing the playbook + +## Troubleshooting + +### OnePassword CLI Not Authenticated + +```bash +op signin +``` + +### Missing Fields in OnePassword + +Ensure the exact field names match: + +- CloudFlare: field must be named `password` +- Telegram: fields must be named `password` and `chat_id` + +### Invalid CloudFlare Token + +- Check token has `Zone:DNS:Edit` permissions +- Verify token is active in CloudFlare dashboard +- Test with: `curl -H "Authorization: Bearer YOUR_TOKEN" https://api.cloudflare.com/client/v4/user/tokens/verify` + +### Telegram Not Working + +- Ensure you've sent at least one message to your bot +- Verify chat ID format (numbers only, may start with -) +- Test with: `go run dynamic-dns-cf.go --test-telegram` + +## Usage + +Once set up, the dynamic DNS will automatically: + +- Update DNS records every 15 minutes +- Send Telegram notifications when IP changes +- Log all activity to system journal (`journalctl -t dynamic-dns`) + +## Domains Configured + +The automation updates these domains: + +- `vleeuwen.me` +- `mvl.sh` +- `mennovanleeuwen.nl` + +To modify the domain list, edit the wrapper script at: +`~/.local/bin/dynamic-dns-update.sh` diff --git a/config/ansible/inventory.ini b/config/ansible/inventory.ini index 854b072..27fcf41 100644 --- a/config/ansible/inventory.ini +++ b/config/ansible/inventory.ini @@ -6,6 +6,5 @@ mennos-cachyos-desktop ansible_connection=local [servers] mennos-server ansible_connection=local -mennos-cloud-server ansible_connection=local mennos-vm ansible_connection=local mennos-cachyos-desktop ansible_connection=local diff --git a/config/ansible/playbook.yml b/config/ansible/playbook.yml index cbaee65..ebe910e 100644 --- a/config/ansible/playbook.yml +++ b/config/ansible/playbook.yml @@ -16,4 +16,4 @@ - name: Include server tasks ansible.builtin.import_tasks: tasks/servers/server.yml - when: inventory_hostname in ['mennos-server', 'mennos-cloud-server', 'mennos-hobbypc', 'mennos-vm', 'mennos-cachyos-desktop'] + when: inventory_hostname in ['mennos-server', 'mennos-hobbypc', 'mennos-vm', 'mennos-cachyos-desktop'] diff --git a/config/ansible/tasks/global/symlinks.yml b/config/ansible/tasks/global/symlinks.yml index bc66d3f..a061322 100644 --- a/config/ansible/tasks/global/symlinks.yml +++ b/config/ansible/tasks/global/symlinks.yml @@ -1,38 +1,43 @@ --- - name: Server setup block: - - name: Set user home directory - ansible.builtin.set_fact: - user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}" + - name: Set user home directory + ansible.builtin.set_fact: + user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}" - - name: Create basic symlinks - ansible.builtin.file: - src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}" - dest: "{{ item.dest | replace('~', user_home) }}" - state: link - force: true - follow: false - loop: - - { src: "$DOTFILES_PATH/config/home-manager", dest: "~/.config/home-manager" } - - { src: "$DOTFILES_PATH/config/ssh/config", dest: "~/.ssh/config" } - - { src: "$DOTFILES_PATH/config/starship.toml", dest: "~/.config/starship.toml" } - - { src: "$DOTFILES_PATH/.bashrc", dest: "~/.bashrc.extra" } + - name: Create basic symlinks + ansible.builtin.file: + src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}" + dest: "{{ item.dest | replace('~', user_home) }}" + state: link + force: true + follow: false + loop: + - { + src: "$DOTFILES_PATH/config/home-manager", + dest: "~/.config/home-manager", + } + - { src: "$DOTFILES_PATH/config/ssh/config", dest: "~/.ssh/config" } + - { + src: "$DOTFILES_PATH/config/starship.toml", + dest: "~/.config/starship.toml", + } + - { src: "$DOTFILES_PATH/.bashrc", dest: "~/.bashrc.extra" } - - name: Create gitconfig symlink - ansible.builtin.file: - src: "{{ gitconfig_mapping[inventory_hostname] | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}" - dest: "{{ user_home }}/.gitconfig" - state: link - force: true - follow: false - vars: - gitconfig_mapping: - mennos-desktop: "$DOTFILES_PATH/config/git/gitconfig.wsl" - mennos-cachyos-desktop: "$DOTFILES_PATH/config/git/gitconfig.linux" - mennos-cachyos-laptop: "$DOTFILES_PATH/config/git/gitconfig.linux" - mennos-laptop-w: "$DOTFILES_PATH/config/git/gitconfig.wsl" - mennos-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server" - mennos-cloud-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server" - mennos-vm: "$DOTFILES_PATH/config/git/gitconfig.mennos-server" + - name: Create gitconfig symlink + ansible.builtin.file: + src: "{{ gitconfig_mapping[inventory_hostname] | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}" + dest: "{{ user_home }}/.gitconfig" + state: link + force: true + follow: false + vars: + gitconfig_mapping: + mennos-desktop: "$DOTFILES_PATH/config/git/gitconfig.wsl" + mennos-cachyos-desktop: "$DOTFILES_PATH/config/git/gitconfig.linux" + mennos-cachyos-laptop: "$DOTFILES_PATH/config/git/gitconfig.linux" + mennos-laptop-w: "$DOTFILES_PATH/config/git/gitconfig.wsl" + mennos-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server" + mennos-vm: "$DOTFILES_PATH/config/git/gitconfig.mennos-server" tags: - symlinks diff --git a/config/ansible/tasks/global/utils/dynamic-dns-cf.go b/config/ansible/tasks/global/utils/dynamic-dns-cf.go new file mode 100644 index 0000000..fa69af6 --- /dev/null +++ b/config/ansible/tasks/global/utils/dynamic-dns-cf.go @@ -0,0 +1,903 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +// CloudFlare API structures +type CloudFlareResponse struct { + Success bool `json:"success"` + Errors []CloudFlareError `json:"errors"` + Result json.RawMessage `json:"result"` + Messages []CloudFlareMessage `json:"messages"` +} + +type CloudFlareError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +type CloudFlareMessage struct { + Code int `json:"code"` + Message string `json:"message"` +} + +type DNSRecord struct { + ID string `json:"id"` + Type string `json:"type"` + Name string `json:"name"` + Content string `json:"content"` + TTL int `json:"ttl"` + ZoneID string `json:"zone_id"` +} + +type Zone struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type TokenVerification struct { + ID string `json:"id"` + Status string `json:"status"` +} + +type NotificationInfo struct { + RecordName string + OldIP string + NewIP string + IsNew bool +} + +// Configuration +type Config struct { + APIToken string + RecordNames []string + IPSources []string + DryRun bool + Verbose bool + Force bool + TTL int + TelegramBotToken string + TelegramChatID string + Client *http.Client +} + +// Default IP sources +var defaultIPSources = []string{ + "https://ifconfig.co/ip", + "https://ip.seeip.org", + "https://ipv4.icanhazip.com", + "https://api.ipify.org", +} + +func main() { + config := &Config{ + Client: &http.Client{Timeout: 10 * time.Second}, + } + + // Command line flags + var ipSourcesFlag string + var recordsFlag string + var listZones bool + var testTelegram bool + flag.StringVar(&recordsFlag, "record", "", "DNS A record name(s) to update - comma-separated for multiple (required)") + flag.StringVar(&ipSourcesFlag, "ip-sources", "", "Comma-separated list of IP detection services (optional)") + flag.BoolVar(&config.DryRun, "dry-run", false, "Show what would be done without making changes") + flag.BoolVar(&config.Verbose, "verbose", false, "Enable verbose logging") + flag.BoolVar(&listZones, "list-zones", false, "List all accessible zones and exit") + flag.BoolVar(&config.Force, "force", false, "Force update even if IP hasn't changed") + flag.BoolVar(&testTelegram, "test-telegram", false, "Send a test Telegram notification and exit") + flag.IntVar(&config.TTL, "ttl", 300, "TTL for DNS record in seconds") + + // Custom usage function + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "CloudFlare Dynamic DNS Tool\n\n") + fmt.Fprintf(os.Stderr, "Updates CloudFlare DNS A records with your current public IP address.\n") + fmt.Fprintf(os.Stderr, "Supports multiple records, dry-run mode, and Telegram notifications.\n\n") + + fmt.Fprintf(os.Stderr, "USAGE:\n") + fmt.Fprintf(os.Stderr, " %s [OPTIONS]\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, "REQUIRED ENVIRONMENT VARIABLES:\n") + fmt.Fprintf(os.Stderr, " CLOUDFLARE_API_TOKEN CloudFlare API token with Zone:DNS:Edit permissions\n") + fmt.Fprintf(os.Stderr, " Get from: https://dash.cloudflare.com/profile/api-tokens\n\n") + + fmt.Fprintf(os.Stderr, "OPTIONAL ENVIRONMENT VARIABLES:\n") + fmt.Fprintf(os.Stderr, " TELEGRAM_BOT_TOKEN Telegram bot token for notifications\n") + fmt.Fprintf(os.Stderr, " TELEGRAM_CHAT_ID Telegram chat ID to send notifications to\n\n") + + fmt.Fprintf(os.Stderr, "OPTIONS:\n") + flag.PrintDefaults() + + fmt.Fprintf(os.Stderr, "\nEXAMPLES:\n") + fmt.Fprintf(os.Stderr, " # Update single record\n") + fmt.Fprintf(os.Stderr, " %s -record home.example.com\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # Update multiple records\n") + fmt.Fprintf(os.Stderr, " %s -record \"home.example.com,api.example.com,vpn.mydomain.net\"\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # Dry run with verbose output\n") + fmt.Fprintf(os.Stderr, " %s -dry-run -verbose -record home.example.com\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # Force update even if IP hasn't changed\n") + fmt.Fprintf(os.Stderr, " %s -force -record home.example.com\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # Custom TTL and IP sources\n") + fmt.Fprintf(os.Stderr, " %s -record home.example.com -ttl 600 -ip-sources \"https://ifconfig.co/ip,https://api.ipify.org\"\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # List accessible CloudFlare zones\n") + fmt.Fprintf(os.Stderr, " %s -list-zones\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, " # Test Telegram notifications\n") + fmt.Fprintf(os.Stderr, " %s -test-telegram\n\n", os.Args[0]) + + fmt.Fprintf(os.Stderr, "SETUP:\n") + fmt.Fprintf(os.Stderr, " 1. Create CloudFlare API token:\n") + fmt.Fprintf(os.Stderr, " - Go to https://dash.cloudflare.com/profile/api-tokens\n") + fmt.Fprintf(os.Stderr, " - Use 'Edit zone DNS' template\n") + fmt.Fprintf(os.Stderr, " - Select your zones\n") + fmt.Fprintf(os.Stderr, " - Copy token and set CLOUDFLARE_API_TOKEN environment variable\n\n") + + fmt.Fprintf(os.Stderr, " 2. Optional: Setup Telegram notifications:\n") + fmt.Fprintf(os.Stderr, " - Message @BotFather on Telegram to create a bot\n") + fmt.Fprintf(os.Stderr, " - Get your chat ID by messaging your bot, then visit:\n") + fmt.Fprintf(os.Stderr, " https://api.telegram.org/bot/getUpdates\n") + fmt.Fprintf(os.Stderr, " - Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables\n\n") + + fmt.Fprintf(os.Stderr, "NOTES:\n") + fmt.Fprintf(os.Stderr, " - Records can be in different CloudFlare zones\n") + fmt.Fprintf(os.Stderr, " - Only updates when IP actually changes (unless -force is used)\n") + fmt.Fprintf(os.Stderr, " - Supports both root domains and subdomains\n") + fmt.Fprintf(os.Stderr, " - Telegram notifications sent only when IP changes\n") + fmt.Fprintf(os.Stderr, " - Use -dry-run to test without making changes\n\n") + } + + flag.Parse() + + // Validate required arguments (unless listing zones or testing telegram) + if recordsFlag == "" && !listZones && !testTelegram { + fmt.Fprintf(os.Stderr, "Error: -record flag is required\n") + flag.Usage() + os.Exit(1) + } + + // Parse record names + if recordsFlag != "" { + config.RecordNames = strings.Split(recordsFlag, ",") + // Trim whitespace from each record name + for i, record := range config.RecordNames { + config.RecordNames[i] = strings.TrimSpace(record) + } + } + + // Get API token from environment + config.APIToken = os.Getenv("CLOUDFLARE_API_TOKEN") + if config.APIToken == "" { + fmt.Fprintf(os.Stderr, "Error: CLOUDFLARE_API_TOKEN environment variable is required\n") + fmt.Fprintf(os.Stderr, "Get your API token from: https://dash.cloudflare.com/profile/api-tokens\n") + fmt.Fprintf(os.Stderr, "Create a token with 'Zone:DNS:Edit' permissions for your zone\n") + os.Exit(1) + } + + // Get optional Telegram credentials + config.TelegramBotToken = os.Getenv("TELEGRAM_BOT_TOKEN") + config.TelegramChatID = os.Getenv("TELEGRAM_CHAT_ID") + + if config.Verbose && config.TelegramBotToken != "" && config.TelegramChatID != "" { + fmt.Println("Telegram notifications enabled") + } + + // Parse IP sources + if ipSourcesFlag != "" { + config.IPSources = strings.Split(ipSourcesFlag, ",") + } else { + config.IPSources = defaultIPSources + } + + if config.Verbose { + fmt.Printf("Config: Records=%v, TTL=%d, DryRun=%v, Force=%v, IPSources=%v\n", + config.RecordNames, config.TTL, config.DryRun, config.Force, config.IPSources) + } + + // If testing telegram, do that and exit (skip API token validation) + if testTelegram { + if err := testTelegramNotification(config); err != nil { + fmt.Fprintf(os.Stderr, "Error testing Telegram: %v\n", err) + os.Exit(1) + } + return + } + + // Validate API token + if err := validateToken(config); err != nil { + fmt.Fprintf(os.Stderr, "Error validating API token: %v\n", err) + os.Exit(1) + } + + if config.Verbose { + fmt.Println("API token validated successfully") + } + + // If listing zones, do that and exit + if listZones { + if err := listAllZones(config); err != nil { + fmt.Fprintf(os.Stderr, "Error listing zones: %v\n", err) + os.Exit(1) + } + return + } + + // Get current public IP + currentIP, err := getCurrentIP(config) + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting current IP: %v\n", err) + os.Exit(1) + } + + if config.Verbose { + fmt.Printf("Current public IP: %s\n", currentIP) + fmt.Printf("Processing %d record(s)\n", len(config.RecordNames)) + } + + // Process each record + var totalUpdates int + var allNotifications []NotificationInfo + + for _, recordName := range config.RecordNames { + if config.Verbose { + fmt.Printf("\n--- Processing record: %s ---\n", recordName) + } + + // Find the zone for the record + zoneName, zoneID, err := findZoneForRecord(config, recordName) + if err != nil { + fmt.Fprintf(os.Stderr, "Error finding zone for %s: %v\n", recordName, err) + continue + } + + if config.Verbose { + fmt.Printf("Found zone: %s (ID: %s)\n", zoneName, zoneID) + } + + // Find existing DNS record + record, err := findDNSRecordByName(config, zoneID, recordName) + if err != nil { + fmt.Fprintf(os.Stderr, "Error finding DNS record %s: %v\n", recordName, err) + continue + } + + // Compare IPs + if record != nil { + if record.Content == currentIP && !config.Force { + fmt.Printf("DNS record %s already points to %s - no update needed\n", recordName, currentIP) + continue + } + + if config.Verbose { + if record.Content == currentIP { + fmt.Printf("DNS record %s already points to %s, but forcing update\n", + recordName, currentIP) + } else { + fmt.Printf("DNS record %s currently points to %s, needs update to %s\n", + recordName, record.Content, currentIP) + } + } + } else { + if config.Verbose { + fmt.Printf("DNS record %s does not exist, will create it\n", recordName) + } + } + + // Update or create record + if config.DryRun { + if record != nil { + if record.Content == currentIP && config.Force { + fmt.Printf("DRY RUN: Would force update DNS record %s (already %s)\n", + recordName, currentIP) + } else { + fmt.Printf("DRY RUN: Would update DNS record %s from %s to %s\n", + recordName, record.Content, currentIP) + } + } else { + fmt.Printf("DRY RUN: Would create DNS record %s with IP %s\n", + recordName, currentIP) + } + + // Collect notification info for dry-run + if record == nil || record.Content != currentIP || config.Force { + var oldIPForNotification string + if record != nil { + oldIPForNotification = record.Content + } + allNotifications = append(allNotifications, NotificationInfo{ + RecordName: recordName, + OldIP: oldIPForNotification, + NewIP: currentIP, + IsNew: record == nil, + }) + } + continue + } + + var wasUpdated bool + var oldIP string + + if record != nil { + oldIP = record.Content + err = updateDNSRecordByName(config, zoneID, record.ID, recordName, currentIP) + if err != nil { + fmt.Fprintf(os.Stderr, "Error updating DNS record %s: %v\n", recordName, err) + continue + } + fmt.Printf("Successfully updated DNS record %s to %s\n", recordName, currentIP) + wasUpdated = true + } else { + err = createDNSRecordByName(config, zoneID, recordName, currentIP) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating DNS record %s: %v\n", recordName, err) + continue + } + fmt.Printf("Successfully created DNS record %s with IP %s\n", recordName, currentIP) + wasUpdated = true + } + + // Collect notification info for actual updates + if wasUpdated && (record == nil || oldIP != currentIP || config.Force) { + allNotifications = append(allNotifications, NotificationInfo{ + RecordName: recordName, + OldIP: oldIP, + NewIP: currentIP, + IsNew: record == nil, + }) + totalUpdates++ + } + } + + // Send batch notification if there were any changes + if len(allNotifications) > 0 { + sendBatchTelegramNotification(config, allNotifications, config.DryRun) + } + + if !config.DryRun && config.Verbose { + fmt.Printf("\nProcessed %d record(s), %d update(s) made\n", len(config.RecordNames), totalUpdates) + } +} + +func validateToken(config *Config) error { + req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/user/tokens/verify", nil) + if err != nil { + return err + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + var cfResp CloudFlareResponse + if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil { + return err + } + + if !cfResp.Success { + return fmt.Errorf("token validation failed: %v", cfResp.Errors) + } + + var tokenInfo TokenVerification + if err := json.Unmarshal(cfResp.Result, &tokenInfo); err != nil { + return err + } + + if tokenInfo.Status != "active" { + return fmt.Errorf("token is not active, status: %s", tokenInfo.Status) + } + + return nil +} + +func getCurrentIP(config *Config) (string, error) { + var lastError error + + for _, source := range config.IPSources { + if config.Verbose { + fmt.Printf("Trying IP source: %s\n", source) + } + + resp, err := config.Client.Get(source) + if err != nil { + lastError = err + if config.Verbose { + fmt.Printf("Failed to get IP from %s: %v\n", source, err) + } + continue + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + + if err != nil { + lastError = err + continue + } + + if resp.StatusCode != 200 { + lastError = fmt.Errorf("HTTP %d from %s", resp.StatusCode, source) + continue + } + + ip := strings.TrimSpace(string(body)) + if ip != "" { + return ip, nil + } + + lastError = fmt.Errorf("empty response from %s", source) + } + + return "", fmt.Errorf("failed to get IP from any source, last error: %v", lastError) +} + +func findZoneForRecord(config *Config, recordName string) (string, string, error) { + // Extract domain from record name (e.g., "sub.example.com" -> try "example.com", "com") + parts := strings.Split(recordName, ".") + + if config.Verbose { + fmt.Printf("Finding zone for record: %s\n", recordName) + } + + for i := 0; i < len(parts); i++ { + zoneName := strings.Join(parts[i:], ".") + + + + req, err := http.NewRequest("GET", + fmt.Sprintf("https://api.cloudflare.com/client/v4/zones?name=%s", zoneName), nil) + if err != nil { + continue + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + continue + } + + var cfResp CloudFlareResponse + err = json.NewDecoder(resp.Body).Decode(&cfResp) + resp.Body.Close() + + if err != nil || !cfResp.Success { + continue + } + + var zones []Zone + if err := json.Unmarshal(cfResp.Result, &zones); err != nil { + continue + } + + if len(zones) > 0 { + return zones[0].Name, zones[0].ID, nil + } + } + + return "", "", fmt.Errorf("no zone found for record %s", recordName) +} + +func findDNSRecordByName(config *Config, zoneID string, recordName string) (*DNSRecord, error) { + url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records?type=A&name=%s", + zoneID, recordName) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var cfResp CloudFlareResponse + if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil { + return nil, err + } + + if !cfResp.Success { + return nil, fmt.Errorf("API error: %v", cfResp.Errors) + } + + var records []DNSRecord + if err := json.Unmarshal(cfResp.Result, &records); err != nil { + return nil, err + } + + if len(records) == 0 { + return nil, nil // Record doesn't exist + } + + return &records[0], nil +} + +func updateDNSRecordByName(config *Config, zoneID, recordID, recordName, ip string) error { + data := map[string]interface{}{ + "type": "A", + "name": recordName, + "content": ip, + "ttl": config.TTL, + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s", zoneID, recordID) + req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonData)) + if err != nil { + return err + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + var cfResp CloudFlareResponse + if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil { + return err + } + + if !cfResp.Success { + return fmt.Errorf("API error: %v", cfResp.Errors) + } + + return nil +} + +func createDNSRecordByName(config *Config, zoneID, recordName, ip string) error { + data := map[string]interface{}{ + "type": "A", + "name": recordName, + "content": ip, + "ttl": config.TTL, + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records", zoneID) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData)) + if err != nil { + return err + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + var cfResp CloudFlareResponse + if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil { + return err + } + + if !cfResp.Success { + return fmt.Errorf("API error: %v", cfResp.Errors) + } + + return nil +} + +func listAllZones(config *Config) error { + req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/zones", nil) + if err != nil { + return err + } + + req.Header.Set("Authorization", "Bearer "+config.APIToken) + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + var cfResp CloudFlareResponse + if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil { + return err + } + + if !cfResp.Success { + return fmt.Errorf("API error: %v", cfResp.Errors) + } + + var zones []Zone + if err := json.Unmarshal(cfResp.Result, &zones); err != nil { + return err + } + + fmt.Printf("Found %d accessible zones:\n", len(zones)) + for _, zone := range zones { + fmt.Printf(" - %s (ID: %s)\n", zone.Name, zone.ID) + } + + if len(zones) == 0 { + fmt.Println("No zones found. Make sure your API token has Zone:Read permissions.") + } + + return nil +} + +func sendTelegramNotification(config *Config, record *DNSRecord, oldIP, newIP string, isDryRun bool) { + // Skip if Telegram is not configured + if config.TelegramBotToken == "" || config.TelegramChatID == "" { + return + } + + var message string + dryRunPrefix := "" + if isDryRun { + dryRunPrefix = "πŸ§ͺ DRY RUN - " + } + + if record == nil { + message = fmt.Sprintf("%sπŸ†• DNS Record Created\n\n"+ + "Record: %s\n"+ + "New IP: %s\n"+ + "TTL: %d seconds", + dryRunPrefix, "test-record", newIP, config.TTL) + } else { + message = fmt.Sprintf("%sπŸ”„ IP Address Changed\n\n"+ + "Record: %s\n"+ + "Old IP: %s\n"+ + "New IP: %s\n"+ + "TTL: %d seconds", + dryRunPrefix, "test-record", oldIP, newIP, config.TTL) + } + + // Prepare Telegram API request + telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken) + + payload := map[string]interface{}{ + "chat_id": config.TelegramChatID, + "text": message, + "parse_mode": "HTML", + } + + jsonData, err := json.Marshal(payload) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to marshal Telegram payload: %v\n", err) + } + return + } + + // Send notification + req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData)) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to create Telegram request: %v\n", err) + } + return + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to send Telegram notification: %v\n", err) + } + return + } + defer resp.Body.Close() + + if resp.StatusCode == 200 { + if config.Verbose { + fmt.Println("Telegram notification sent successfully") + } + } else { + if config.Verbose { + body, _ := io.ReadAll(resp.Body) + fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body)) + } + } +} + +func testTelegramNotification(config *Config) error { + if config.TelegramBotToken == "" || config.TelegramChatID == "" { + return fmt.Errorf("Telegram not configured. Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables") + } + + fmt.Println("Testing Telegram notification...") + + // Send a test message + message := "πŸ§ͺ Dynamic DNS Test\n\n" + + "This is a test notification from your CloudFlare Dynamic DNS tool.\n\n" + + "βœ… Telegram integration is working correctly!" + + telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken) + + payload := map[string]interface{}{ + "chat_id": config.TelegramChatID, + "text": message, + "parse_mode": "HTML", + } + + jsonData, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("failed to marshal payload: %v", err) + } + + req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData)) + if err != nil { + return fmt.Errorf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + return fmt.Errorf("failed to send request: %v", err) + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + if resp.StatusCode == 200 { + fmt.Println("βœ… Test notification sent successfully!") + if config.Verbose { + fmt.Printf("Response: %s\n", string(body)) + } + return nil + } else { + return fmt.Errorf("failed to send notification (HTTP %d): %s", resp.StatusCode, string(body)) + } +} + +func sendBatchTelegramNotification(config *Config, notifications []NotificationInfo, isDryRun bool) { + // Skip if Telegram is not configured + if config.TelegramBotToken == "" || config.TelegramChatID == "" { + return + } + + if len(notifications) == 0 { + return + } + + var message string + dryRunPrefix := "" + if isDryRun { + dryRunPrefix = "πŸ§ͺ DRY RUN - " + } + + if len(notifications) == 1 { + // Single record notification + notif := notifications[0] + if notif.IsNew { + message = fmt.Sprintf("%sπŸ†• DNS Record Created\n\n"+ + "Record: %s\n"+ + "New IP: %s\n"+ + "TTL: %d seconds", + dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL) + } else if notif.OldIP == notif.NewIP { + message = fmt.Sprintf("%sπŸ”„ DNS Record Force Updated\n\n"+ + "Record: %s\n"+ + "IP: %s (unchanged)\n"+ + "TTL: %d seconds\n"+ + "Note: Forced update requested", + dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL) + } else { + message = fmt.Sprintf("%sπŸ”„ IP Address Changed\n\n"+ + "Record: %s\n"+ + "Old IP: %s\n"+ + "New IP: %s\n"+ + "TTL: %d seconds", + dryRunPrefix, notif.RecordName, notif.OldIP, notif.NewIP, config.TTL) + } + } else { + // Multiple records notification + var newCount, updatedCount int + for _, notif := range notifications { + if notif.IsNew { + newCount++ + } else { + updatedCount++ + } + } + + message = fmt.Sprintf("%sπŸ“‹ Multiple DNS Records Updated\n\n", dryRunPrefix) + if newCount > 0 { + message += fmt.Sprintf("πŸ†• Created: %d record(s)\n", newCount) + } + if updatedCount > 0 { + message += fmt.Sprintf("πŸ”„ Updated: %d record(s)\n", updatedCount) + } + message += fmt.Sprintf("\nNew IP: %s\nTTL: %d seconds\n\nRecords:", notifications[0].NewIP, config.TTL) + + for _, notif := range notifications { + if notif.IsNew { + message += fmt.Sprintf("\nβ€’ %s (new)", notif.RecordName) + } else if notif.OldIP == notif.NewIP { + message += fmt.Sprintf("\nβ€’ %s (forced)", notif.RecordName) + } else { + message += fmt.Sprintf("\nβ€’ %s (%s β†’ %s)", notif.RecordName, notif.OldIP, notif.NewIP) + } + } + } + + // Send the notification using the same logic as single notifications + telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken) + + payload := map[string]interface{}{ + "chat_id": config.TelegramChatID, + "text": message, + "parse_mode": "HTML", + } + + jsonData, err := json.Marshal(payload) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to marshal Telegram payload: %v\n", err) + } + return + } + + req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData)) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to create Telegram request: %v\n", err) + } + return + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := config.Client.Do(req) + if err != nil { + if config.Verbose { + fmt.Printf("Failed to send Telegram notification: %v\n", err) + } + return + } + defer resp.Body.Close() + + if resp.StatusCode == 200 { + if config.Verbose { + fmt.Println("Telegram notification sent successfully") + } + } else { + if config.Verbose { + body, _ := io.ReadAll(resp.Body) + fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body)) + } + } +} diff --git a/config/ansible/tasks/servers/dynamic-dns.yml b/config/ansible/tasks/servers/dynamic-dns.yml new file mode 100644 index 0000000..e8d55fc --- /dev/null +++ b/config/ansible/tasks/servers/dynamic-dns.yml @@ -0,0 +1,99 @@ +--- +- name: Dynamic DNS setup + block: + - name: Create environment file for dynamic DNS + ansible.builtin.template: + src: "{{ playbook_dir }}/templates/dynamic-dns.env.j2" + dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns.env" + mode: "0600" + + - name: Create dynamic DNS wrapper script + ansible.builtin.copy: + dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh" + mode: "0755" + content: | + #!/bin/bash + + # Load environment variables + source {{ ansible_user_dir }}/.local/bin/dynamic-dns.env + + # Change to the directory containing the binary + cd {{ ansible_user_dir }}/.local/bin + + # Run dynamic DNS update (binary compiled by utils.yml) + dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns + + - name: Setup cron job for dynamic DNS updates (fallback) + ansible.builtin.cron: + name: "Dynamic DNS Update" + minute: "*/15" + job: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh" + user: "{{ ansible_user }}" + state: present + ignore_errors: true + tags: [cron] + + - name: Create systemd user directory + ansible.builtin.file: + path: "{{ ansible_user_dir }}/.config/systemd/user" + state: directory + mode: "0755" + + - name: Create dynamic DNS systemd timer + ansible.builtin.copy: + dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.timer" + mode: "0644" + content: | + [Unit] + Description=Dynamic DNS Update Timer + Requires=dynamic-dns.service + + [Timer] + OnCalendar=*:0/15 + Persistent=true + + [Install] + WantedBy=timers.target + + - name: Create dynamic DNS systemd service + ansible.builtin.copy: + dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.service" + mode: "0644" + content: | + [Unit] + Description=Dynamic DNS Update + After=network-online.target + Wants=network-online.target + + [Service] + Type=oneshot + ExecStart={{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh + EnvironmentFile={{ ansible_user_dir }}/.local/bin/dynamic-dns.env + + [Install] + WantedBy=default.target + + - name: Reload systemd user daemon + ansible.builtin.systemd: + daemon_reload: true + scope: user + + - name: Enable and start dynamic DNS timer + ansible.builtin.systemd: + name: dynamic-dns.timer + enabled: true + state: started + scope: user + + - name: Display setup completion message + ansible.builtin.debug: + msg: | + Dynamic DNS setup complete! + - Systemd timer: systemctl --user status dynamic-dns.timer + - Check logs: journalctl --user -u dynamic-dns.service -f + - Manual run: ~/.local/bin/dynamic-dns-update.sh + - Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl + + when: inventory_hostname == 'mennos-cachyos-desktop' + tags: + - dynamic-dns diff --git a/config/ansible/tasks/servers/juicefs.yml b/config/ansible/tasks/servers/juicefs.yml index ebae982..0bf5171 100644 --- a/config/ansible/tasks/servers/juicefs.yml +++ b/config/ansible/tasks/servers/juicefs.yml @@ -70,7 +70,7 @@ - name: Include JuiceFS Redis tasks ansible.builtin.include_tasks: services/redis/redis.yml - when: inventory_hostname == 'mennos-cloud-server' + when: inventory_hostname == 'mennos-cachyos-desktop' - name: Enable and start JuiceFS service ansible.builtin.systemd: diff --git a/config/ansible/tasks/servers/server.yml b/config/ansible/tasks/servers/server.yml index 873effb..2bf7458 100644 --- a/config/ansible/tasks/servers/server.yml +++ b/config/ansible/tasks/servers/server.yml @@ -18,6 +18,11 @@ tags: - juicefs + - name: Include Dynamic DNS tasks + ansible.builtin.include_tasks: dynamic-dns.yml + tags: + - dynamic-dns + - name: System performance optimizations ansible.posix.sysctl: name: "{{ item.name }}" @@ -46,10 +51,6 @@ vars: services: - - name: uptime-kuma - enabled: true - hosts: - - mennos-cloud-server - name: gitea enabled: true hosts: diff --git a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 index 10f6e42..c4bcf11 100644 --- a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 +++ b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 @@ -28,14 +28,7 @@ } {% endif %} -{% if inventory_hostname == 'mennos-cloud-server' %} -status.vleeuwen.me status.mvl.sh { - import country_block - reverse_proxy uptime-kuma:3001 - tls {{ caddy_email }} -} - -{% elif inventory_hostname == 'mennos-cachyos-desktop' %} +{% if inventory_hostname == 'mennos-cachyos-desktop' %} git.mvl.sh { import country_block reverse_proxy gitea:3000 @@ -213,5 +206,4 @@ drive.mvl.sh drive.vleeuwen.me { tls {{ caddy_email }} } - {% endif %} diff --git a/config/ansible/tasks/servers/services/redis/docker-compose.yml.j2 b/config/ansible/tasks/servers/services/redis/docker-compose.yml.j2 index 9896517..f021055 100644 --- a/config/ansible/tasks/servers/services/redis/docker-compose.yml.j2 +++ b/config/ansible/tasks/servers/services/redis/docker-compose.yml.j2 @@ -5,7 +5,7 @@ services: ports: - "6379:6379" volumes: - - /mnt/services/redis-data:/data + - /mnt/services/redis:/data command: ["redis-server", "--appendonly", "yes", "--requirepass", "{{ REDIS_PASSWORD }}"] environment: - TZ=Europe/Amsterdam diff --git a/config/ansible/tasks/servers/services/uptime-kuma/docker-compose.yml.j2 b/config/ansible/tasks/servers/services/uptime-kuma/docker-compose.yml.j2 deleted file mode 100644 index 444030a..0000000 --- a/config/ansible/tasks/servers/services/uptime-kuma/docker-compose.yml.j2 +++ /dev/null @@ -1,22 +0,0 @@ -services: - uptime-kuma: - image: louislam/uptime-kuma:latest - restart: unless-stopped - volumes: - - {{ uptime_kuma_data_dir }}:/app/data - - /var/run/docker.sock:/var/run/docker.sock:ro - environment: - - PUID=1000 - - PGID=100 - - TZ=Europe/Amsterdam - ports: - - "3001:3001" - extra_hosts: - - "host.docker.internal:host-gateway" - networks: - - caddy_network - -networks: - caddy_network: - external: true - name: caddy_default diff --git a/config/ansible/tasks/servers/services/uptime-kuma/uptime-kuma.yml b/config/ansible/tasks/servers/services/uptime-kuma/uptime-kuma.yml deleted file mode 100644 index 8616068..0000000 --- a/config/ansible/tasks/servers/services/uptime-kuma/uptime-kuma.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Deploy Uptime Kuma service - block: - - name: Set Uptime Kuma directories - ansible.builtin.set_fact: - uptime_kuma_service_dir: "{{ ansible_env.HOME }}/services/uptime-kuma" - uptime_kuma_data_dir: "/mnt/object_storage/services/uptime-kuma" - - - name: Create Uptime Kuma directory - ansible.builtin.file: - path: "{{ uptime_kuma_service_dir }}" - state: directory - mode: "0755" - - - name: Deploy Uptime Kuma docker-compose.yml - ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ uptime_kuma_service_dir }}/docker-compose.yml" - mode: "0644" - register: uptime_kuma_compose - - - name: Stop Uptime Kuma service if config changed - ansible.builtin.command: docker compose -f "{{ uptime_kuma_service_dir }}/docker-compose.yml" down --remove-orphans - when: uptime_kuma_compose.changed - - - name: Start Uptime Kuma service - ansible.builtin.command: docker compose -f "{{ uptime_kuma_service_dir }}/docker-compose.yml" up -d - when: uptime_kuma_compose.changed or uptime_kuma_start | default(false) | bool - tags: - - services - - uptime_kuma diff --git a/config/ansible/tasks/workstations/flatpaks.yml b/config/ansible/tasks/workstations/flatpaks.yml index d4f3e6f..1e4950f 100644 --- a/config/ansible/tasks/workstations/flatpaks.yml +++ b/config/ansible/tasks/workstations/flatpaks.yml @@ -48,10 +48,7 @@ - tv.plex.PlexDesktop # Messaging - - org.telegram.desktop - - org.signal.Signal - com.rtosta.zapzap - - io.github.equicord.equibop # Utilities - com.ranfdev.DistroShelf diff --git a/config/ansible/templates/dynamic-dns.env.j2 b/config/ansible/templates/dynamic-dns.env.j2 new file mode 100644 index 0000000..0ba01a6 --- /dev/null +++ b/config/ansible/templates/dynamic-dns.env.j2 @@ -0,0 +1,12 @@ +# Dynamic DNS Environment Configuration +# This file contains sensitive credentials and should be kept secure +# Credentials are automatically retrieved from OnePassword + +# CloudFlare API Token (required) +# Retrieved from OnePassword: CloudFlare API Token +export CLOUDFLARE_API_TOKEN="{{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}" + +# Telegram Bot Credentials (for notifications when IP changes) +# Retrieved from OnePassword: Telegram DynDNS Bot +export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}" +export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}" diff --git a/config/ansible/templates/juicefs.service.j2 b/config/ansible/templates/juicefs.service.j2 index 8e7a083..d59a0a3 100644 --- a/config/ansible/templates/juicefs.service.j2 +++ b/config/ansible/templates/juicefs.service.j2 @@ -5,7 +5,7 @@ Before=docker.service [Service] Type=simple -ExecStart=/usr/local/bin/juicefs mount redis://:{{ redis_password }}@100.82.178.14:6379/0 /mnt/object_storage \ +ExecStart=/usr/local/bin/juicefs mount redis://:{{ redis_password }}@mennos-cachyos-desktop:6379/0 /mnt/object_storage \ --cache-dir=/var/jfsCache \ --buffer-size=4096 \ --prefetch=16 \ diff --git a/config/home-manager/flake.nix b/config/home-manager/flake.nix index 2e3df1c..65c46e1 100644 --- a/config/home-manager/flake.nix +++ b/config/home-manager/flake.nix @@ -89,20 +89,6 @@ }; }; - "mennos-cloud-server" = home-manager.lib.homeManagerConfiguration { - inherit pkgs; - modules = [ ./home.nix ]; - extraSpecialArgs = { - inherit - pkgs - pkgs-unstable - opnix - ; - isServer = true; - hostname = "mennos-cloud-server"; - }; - }; - "mennos-vm" = home-manager.lib.homeManagerConfiguration { inherit pkgs; modules = [ ./home.nix ]; diff --git a/config/home-manager/packages/common/hosts/mennos-cloud-server.nix b/config/home-manager/packages/common/hosts/mennos-cloud-server.nix deleted file mode 100644 index 46f1ad7..0000000 --- a/config/home-manager/packages/common/hosts/mennos-cloud-server.nix +++ /dev/null @@ -1,4 +0,0 @@ -{ pkgs-unstable, ... }: -{ - home.packages = with pkgs-unstable; [ ]; -} diff --git a/config/home-manager/packages/common/packages.nix b/config/home-manager/packages/common/packages.nix index 138dbc6..fc4e1bf 100644 --- a/config/home-manager/packages/common/packages.nix +++ b/config/home-manager/packages/common/packages.nix @@ -17,8 +17,6 @@ [ ./hosts/mennos-cachyos-laptop.nix ] else if hostname == "mennos-server" then [ ./hosts/mennos-server.nix ] - else if hostname == "mennos-cloud-server" then - [ ./hosts/mennos-cloud-server.nix ] else if hostname == "mennos-vm" then [ ./hosts/mennos-vm.nix ] else From c8444de0d531eafab4d8c786b28e6078433b96a5 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Wed, 23 Jul 2025 14:23:03 +0200 Subject: [PATCH 08/11] fix: move ~/services to ~/.services Signed-off-by: Menno van Leeuwen --- bin/actions/service.py | 2 +- .../servers/services/arr-stack/arr-stack.yml | 2 +- .../tasks/servers/services/beszel/beszel.yml | 2 +- .../tasks/servers/services/caddy/Caddyfile.j2 | 21 ++++- .../tasks/servers/services/caddy/caddy.yml | 94 +++++++++---------- .../services/downloaders/downloaders.yml | 2 +- .../tasks/servers/services/dozzle/dozzle.yml | 2 +- .../tasks/servers/services/echoip/echoip.yml | 8 +- .../servers/services/factorio/factorio.yml | 2 +- .../tasks/servers/services/gitea/gitea.yml | 2 +- .../tasks/servers/services/golink/golink.yml | 2 +- .../home-assistant/home-assistant.yml | 2 +- .../tasks/servers/services/immich/immich.yml | 2 +- .../servers/services/nextcloud/nextcloud.yml | 2 +- .../tasks/servers/services/plex/plex.yml | 2 +- .../services/privatebin/privatebin.yml | 2 +- .../tasks/servers/services/redis/redis.yml | 6 +- .../servers/services/service_cleanup.yml | 8 +- .../tasks/servers/services/stash/stash.yml | 6 +- .../servers/services/tautulli/tautulli.yml | 2 +- .../unifi-network-application.yml | 2 +- .../servers/services/wireguard/wireguard.yml | 2 +- 22 files changed, 98 insertions(+), 77 deletions(-) diff --git a/bin/actions/service.py b/bin/actions/service.py index 0946019..c57e820 100755 --- a/bin/actions/service.py +++ b/bin/actions/service.py @@ -10,7 +10,7 @@ sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin")) from helpers.functions import printfe, println, logo # Base directory for Docker services $HOME/services -SERVICES_DIR = os.path.join(os.path.expanduser("~"), "services") +SERVICES_DIR = os.path.join(os.path.expanduser("~"), ".services") # Protected services that should never be stopped PROTECTED_SERVICES = ["juicefs-redis"] diff --git a/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml b/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml index 57b6e97..b9da6d0 100644 --- a/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml +++ b/config/ansible/tasks/servers/services/arr-stack/arr-stack.yml @@ -3,7 +3,7 @@ block: - name: Set ArrStack directories ansible.builtin.set_fact: - arr_stack_service_dir: "{{ ansible_env.HOME }}/services/arr-stack" + arr_stack_service_dir: "{{ ansible_env.HOME }}/.services/arr-stack" arr_stack_data_dir: "/mnt/services/arr-stack" - name: Create ArrStack directory diff --git a/config/ansible/tasks/servers/services/beszel/beszel.yml b/config/ansible/tasks/servers/services/beszel/beszel.yml index 92ec9cc..72a8fdc 100644 --- a/config/ansible/tasks/servers/services/beszel/beszel.yml +++ b/config/ansible/tasks/servers/services/beszel/beszel.yml @@ -3,7 +3,7 @@ block: - name: Set Beszel directories ansible.builtin.set_fact: - beszel_service_dir: "{{ ansible_env.HOME }}/services/beszel" + beszel_service_dir: "{{ ansible_env.HOME }}/.services/beszel" beszel_data_dir: "/mnt/services/beszel" - name: Create Beszel directory diff --git a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 index c4bcf11..089031f 100644 --- a/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 +++ b/config/ansible/tasks/servers/services/caddy/Caddyfile.j2 @@ -94,10 +94,14 @@ home.mvl.sh { home.vleeuwen.me { import country_block - redir https://home.mvl.sh{uri} + reverse_proxy host.docker.internal:8123 { + header_up Host {upstream_hostport} + header_up X-Real-IP {http.request.remote.host} + } tls {{ caddy_email }} } + unifi.mvl.sh { reverse_proxy unifi-controller:8443 { transport http { @@ -170,6 +174,21 @@ plex.vleeuwen.me { tls {{ caddy_email }} } +tautulli.mvl.sh { + import country_block + reverse_proxy host.docker.internal:8181 { + header_up Host {upstream_hostport} + header_up X-Real-IP {http.request.remote.host} + } + tls {{ caddy_email }} +} + +tautulli.vleeuwen.me { + import country_block + redir https://tautulli.mvl.sh{uri} + tls {{ caddy_email }} +} + drive.mvl.sh drive.vleeuwen.me { import country_block diff --git a/config/ansible/tasks/servers/services/caddy/caddy.yml b/config/ansible/tasks/servers/services/caddy/caddy.yml index 40b7aeb..b1f9e89 100644 --- a/config/ansible/tasks/servers/services/caddy/caddy.yml +++ b/config/ansible/tasks/servers/services/caddy/caddy.yml @@ -1,59 +1,59 @@ --- - name: Deploy Caddy service block: - - name: Set Caddy directories - ansible.builtin.set_fact: - caddy_service_dir: "{{ ansible_env.HOME }}/services/caddy" - caddy_data_dir: "/mnt/services/caddy" - geoip_db_path: "/mnt/services/echoip" - caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}" + - name: Set Caddy directories + ansible.builtin.set_fact: + caddy_service_dir: "{{ ansible_env.HOME }}/.services/caddy" + caddy_data_dir: "/mnt/services/caddy" + geoip_db_path: "/mnt/services/echoip" + caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}" - - name: Create Caddy directory - ansible.builtin.file: - path: "{{ caddy_service_dir }}" - state: directory - mode: "0755" + - name: Create Caddy directory + ansible.builtin.file: + path: "{{ caddy_service_dir }}" + state: directory + mode: "0755" - - name: Setup country blocking - ansible.builtin.include_tasks: country-blocking.yml + - name: Setup country blocking + ansible.builtin.include_tasks: country-blocking.yml - - name: Copy Dockerfile for custom Caddy build - ansible.builtin.copy: - src: Dockerfile - dest: "{{ caddy_service_dir }}/Dockerfile" - mode: "0644" - register: caddy_dockerfile + - name: Copy Dockerfile for custom Caddy build + ansible.builtin.copy: + src: Dockerfile + dest: "{{ caddy_service_dir }}/Dockerfile" + mode: "0644" + register: caddy_dockerfile - - name: Create Caddy network - ansible.builtin.command: docker network create caddy_default - register: create_caddy_network - failed_when: - - create_caddy_network.rc != 0 - - "'already exists' not in create_caddy_network.stderr" - changed_when: create_caddy_network.rc == 0 + - name: Create Caddy network + ansible.builtin.command: docker network create caddy_default + register: create_caddy_network + failed_when: + - create_caddy_network.rc != 0 + - "'already exists' not in create_caddy_network.stderr" + changed_when: create_caddy_network.rc == 0 - - name: Deploy Caddy docker-compose.yml - ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ caddy_service_dir }}/docker-compose.yml" - mode: "0644" - register: caddy_compose + - name: Deploy Caddy docker-compose.yml + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ caddy_service_dir }}/docker-compose.yml" + mode: "0644" + register: caddy_compose - - name: Deploy Caddy Caddyfile - ansible.builtin.template: - src: Caddyfile.j2 - dest: "{{ caddy_service_dir }}/Caddyfile" - mode: "0644" - register: caddy_file + - name: Deploy Caddy Caddyfile + ansible.builtin.template: + src: Caddyfile.j2 + dest: "{{ caddy_service_dir }}/Caddyfile" + mode: "0644" + register: caddy_file - - name: Stop Caddy service - ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans - when: caddy_compose.changed or caddy_file.changed + - name: Stop Caddy service + ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans + when: caddy_compose.changed or caddy_file.changed - - name: Start Caddy service - ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d - when: caddy_compose.changed or caddy_file.changed + - name: Start Caddy service + ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d + when: caddy_compose.changed or caddy_file.changed tags: - - caddy - - services - - reverse-proxy + - caddy + - services + - reverse-proxy diff --git a/config/ansible/tasks/servers/services/downloaders/downloaders.yml b/config/ansible/tasks/servers/services/downloaders/downloaders.yml index d09cc4f..2030ea7 100644 --- a/config/ansible/tasks/servers/services/downloaders/downloaders.yml +++ b/config/ansible/tasks/servers/services/downloaders/downloaders.yml @@ -4,7 +4,7 @@ - name: Set Downloaders directories ansible.builtin.set_fact: local_data_dir: "/mnt/data" - downloaders_service_dir: "{{ ansible_env.HOME }}/services/downloaders" + downloaders_service_dir: "{{ ansible_env.HOME }}/.services/downloaders" downloaders_data_dir: "/mnt/services/downloaders" - name: Create Downloaders directory diff --git a/config/ansible/tasks/servers/services/dozzle/dozzle.yml b/config/ansible/tasks/servers/services/dozzle/dozzle.yml index a2ea2de..e50878e 100644 --- a/config/ansible/tasks/servers/services/dozzle/dozzle.yml +++ b/config/ansible/tasks/servers/services/dozzle/dozzle.yml @@ -3,7 +3,7 @@ block: - name: Set Dozzle directories ansible.builtin.set_fact: - dozzle_service_dir: "{{ ansible_env.HOME }}/services/dozzle" + dozzle_service_dir: "{{ ansible_env.HOME }}/.services/dozzle" dozzle_data_dir: "/mnt/services/dozzle" - name: Create Dozzle directory diff --git a/config/ansible/tasks/servers/services/echoip/echoip.yml b/config/ansible/tasks/servers/services/echoip/echoip.yml index 9b8bef9..58a5f5b 100644 --- a/config/ansible/tasks/servers/services/echoip/echoip.yml +++ b/config/ansible/tasks/servers/services/echoip/echoip.yml @@ -3,11 +3,13 @@ block: - name: Set EchoIP directories ansible.builtin.set_fact: - echoip_service_dir: "{{ ansible_env.HOME }}/services/echoip" + echoip_service_dir: "{{ ansible_env.HOME }}/.services/echoip" echoip_data_dir: "/mnt/services/echoip" - maxmind_account_id: "{{ lookup('community.general.onepassword', 'MaxMind', + maxmind_account_id: + "{{ lookup('community.general.onepassword', 'MaxMind', vault='Dotfiles', field='account_id') | regex_replace('\\s+', '') }}" - maxmind_license_key: "{{ lookup('community.general.onepassword', 'MaxMind', + maxmind_license_key: + "{{ lookup('community.general.onepassword', 'MaxMind', vault='Dotfiles', field='license_key') | regex_replace('\\s+', '') }}" # Requires: gather_facts: true in playbook diff --git a/config/ansible/tasks/servers/services/factorio/factorio.yml b/config/ansible/tasks/servers/services/factorio/factorio.yml index 2e0c582..2cb5e92 100644 --- a/config/ansible/tasks/servers/services/factorio/factorio.yml +++ b/config/ansible/tasks/servers/services/factorio/factorio.yml @@ -3,7 +3,7 @@ block: - name: Set Factorio directories ansible.builtin.set_fact: - factorio_service_dir: "{{ ansible_env.HOME }}/services/factorio" + factorio_service_dir: "{{ ansible_env.HOME }}/.services/factorio" factorio_data_dir: "/mnt/services/factorio" - name: Create Factorio directory diff --git a/config/ansible/tasks/servers/services/gitea/gitea.yml b/config/ansible/tasks/servers/services/gitea/gitea.yml index 43ef5a7..0465a8e 100644 --- a/config/ansible/tasks/servers/services/gitea/gitea.yml +++ b/config/ansible/tasks/servers/services/gitea/gitea.yml @@ -4,7 +4,7 @@ - name: Set Gitea directories ansible.builtin.set_fact: gitea_data_dir: "/mnt/services/gitea" - gitea_service_dir: "{{ ansible_env.HOME }}/services/gitea" + gitea_service_dir: "{{ ansible_env.HOME }}/.services/gitea" - name: Create Gitea directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/golink/golink.yml b/config/ansible/tasks/servers/services/golink/golink.yml index a8e8362..d8aba88 100644 --- a/config/ansible/tasks/servers/services/golink/golink.yml +++ b/config/ansible/tasks/servers/services/golink/golink.yml @@ -4,7 +4,7 @@ - name: Set GoLink directories ansible.builtin.set_fact: golink_data_dir: "/mnt/services/golink" - golink_service_dir: "{{ ansible_env.HOME }}/services/golink" + golink_service_dir: "{{ ansible_env.HOME }}/.services/golink" - name: Create GoLink directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/home-assistant/home-assistant.yml b/config/ansible/tasks/servers/services/home-assistant/home-assistant.yml index bf8a9e4..d800aa2 100644 --- a/config/ansible/tasks/servers/services/home-assistant/home-assistant.yml +++ b/config/ansible/tasks/servers/services/home-assistant/home-assistant.yml @@ -4,7 +4,7 @@ - name: Set Home Assistant directories ansible.builtin.set_fact: homeassistant_data_dir: "/mnt/services/homeassistant" - homeassistant_service_dir: "{{ ansible_env.HOME }}/services/homeassistant" + homeassistant_service_dir: "{{ ansible_env.HOME }}/.services/homeassistant" - name: Create Home Assistant directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/immich/immich.yml b/config/ansible/tasks/servers/services/immich/immich.yml index 50b63f0..f4dfc3f 100644 --- a/config/ansible/tasks/servers/services/immich/immich.yml +++ b/config/ansible/tasks/servers/services/immich/immich.yml @@ -5,7 +5,7 @@ ansible.builtin.set_fact: immich_data_dir: "/mnt/data/photos/immich-library" immich_database_dir: "/mnt/services/immich/postgres" - immich_service_dir: "{{ ansible_env.HOME }}/services/immich" + immich_service_dir: "{{ ansible_env.HOME }}/.services/immich" - name: Create Immich directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/nextcloud/nextcloud.yml b/config/ansible/tasks/servers/services/nextcloud/nextcloud.yml index 6f3b7b0..5308554 100644 --- a/config/ansible/tasks/servers/services/nextcloud/nextcloud.yml +++ b/config/ansible/tasks/servers/services/nextcloud/nextcloud.yml @@ -3,7 +3,7 @@ block: - name: Set Nextcloud directories ansible.builtin.set_fact: - nextcloud_service_dir: "{{ ansible_env.HOME }}/services/nextcloud" + nextcloud_service_dir: "{{ ansible_env.HOME }}/.services/nextcloud" nextcloud_data_dir: "/mnt/services/nextcloud" - name: Create Nextcloud directory diff --git a/config/ansible/tasks/servers/services/plex/plex.yml b/config/ansible/tasks/servers/services/plex/plex.yml index 4aa6306..4e17fd4 100644 --- a/config/ansible/tasks/servers/services/plex/plex.yml +++ b/config/ansible/tasks/servers/services/plex/plex.yml @@ -4,7 +4,7 @@ - name: Set Plex directories ansible.builtin.set_fact: plex_data_dir: "/mnt/services/plex" - plex_service_dir: "{{ ansible_env.HOME }}/services/plex" + plex_service_dir: "{{ ansible_env.HOME }}/.services/plex" - name: Create Plex directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/privatebin/privatebin.yml b/config/ansible/tasks/servers/services/privatebin/privatebin.yml index c32cd31..3ae5075 100644 --- a/config/ansible/tasks/servers/services/privatebin/privatebin.yml +++ b/config/ansible/tasks/servers/services/privatebin/privatebin.yml @@ -4,7 +4,7 @@ - name: Set PrivateBin directories ansible.builtin.set_fact: privatebin_data_dir: "/mnt/services/privatebin" - privatebin_service_dir: "{{ ansible_env.HOME }}/services/privatebin" + privatebin_service_dir: "{{ ansible_env.HOME }}/.services/privatebin" - name: Create PrivateBin directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/redis/redis.yml b/config/ansible/tasks/servers/services/redis/redis.yml index 6c8c55f..22b0f29 100644 --- a/config/ansible/tasks/servers/services/redis/redis.yml +++ b/config/ansible/tasks/servers/services/redis/redis.yml @@ -3,7 +3,7 @@ block: - name: Set Redis facts ansible.builtin.set_fact: - redis_service_dir: "{{ ansible_env.HOME }}/services/juicefs-redis" + redis_service_dir: "{{ ansible_env.HOME }}/.services/juicefs-redis" redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}" - name: Create Redis service directory @@ -76,5 +76,5 @@ changed_when: docker_restart.rc == 0 when: redis_compose.changed tags: - - services - - redis + - services + - redis diff --git a/config/ansible/tasks/servers/services/service_cleanup.yml b/config/ansible/tasks/servers/services/service_cleanup.yml index ff27d25..f2ae7fa 100644 --- a/config/ansible/tasks/servers/services/service_cleanup.yml +++ b/config/ansible/tasks/servers/services/service_cleanup.yml @@ -7,7 +7,7 @@ - name: Check service directories existence for disabled services ansible.builtin.stat: - path: "{{ ansible_env.HOME }}/services/{{ item.name }}" + path: "{{ ansible_env.HOME }}/.services/{{ item.name }}" register: service_dir_results loop: "{{ services_to_cleanup }}" loop_control: @@ -19,14 +19,14 @@ - name: Check if docker-compose file exists for services to cleanup ansible.builtin.stat: - path: "{{ ansible_env.HOME }}/services/{{ item.name }}/docker-compose.yml" + path: "{{ ansible_env.HOME }}/.services/{{ item.name }}/docker-compose.yml" register: compose_file_results loop: "{{ services_with_dirs }}" loop_control: label: "{{ item.name }}" - name: Stop disabled services with docker-compose files - ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans + ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/.services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans loop: "{{ compose_file_results.results | selectattr('stat.exists', 'equalto', true) }}" loop_control: label: "{{ item.item.name }}" @@ -36,7 +36,7 @@ - name: Remove service directories for disabled services ansible.builtin.file: - path: "{{ ansible_env.HOME }}/services/{{ item.name }}" + path: "{{ ansible_env.HOME }}/.services/{{ item.name }}" state: absent loop: "{{ services_with_dirs }}" loop_control: diff --git a/config/ansible/tasks/servers/services/stash/stash.yml b/config/ansible/tasks/servers/services/stash/stash.yml index 02dff9f..f264304 100644 --- a/config/ansible/tasks/servers/services/stash/stash.yml +++ b/config/ansible/tasks/servers/services/stash/stash.yml @@ -3,9 +3,9 @@ block: - name: Set Stash directories ansible.builtin.set_fact: - stash_data_dir: '/mnt/data/stash' - stash_config_dir: '/mnt/services/stash' - stash_service_dir: "{{ ansible_env.HOME }}/services/stash" + stash_data_dir: "/mnt/data/stash" + stash_config_dir: "/mnt/services/stash" + stash_service_dir: "{{ ansible_env.HOME }}/.services/stash" - name: Create Stash directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/tautulli/tautulli.yml b/config/ansible/tasks/servers/services/tautulli/tautulli.yml index 9a6d681..a04c1ec 100644 --- a/config/ansible/tasks/servers/services/tautulli/tautulli.yml +++ b/config/ansible/tasks/servers/services/tautulli/tautulli.yml @@ -4,7 +4,7 @@ - name: Set Tautulli directories ansible.builtin.set_fact: tautulli_data_dir: "{{ '/mnt/services/tautulli' }}" - tautulli_service_dir: "{{ ansible_env.HOME }}/services/tautulli" + tautulli_service_dir: "{{ ansible_env.HOME }}/.services/tautulli" - name: Create Tautulli directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/unifi-network-application/unifi-network-application.yml b/config/ansible/tasks/servers/services/unifi-network-application/unifi-network-application.yml index 201d124..41c2c94 100644 --- a/config/ansible/tasks/servers/services/unifi-network-application/unifi-network-application.yml +++ b/config/ansible/tasks/servers/services/unifi-network-application/unifi-network-application.yml @@ -4,7 +4,7 @@ - name: Set Unifi Network App directories ansible.builtin.set_fact: unifi_network_application_data_dir: "/mnt/services/unifi_network_application" - unifi_network_application_service_dir: "{{ ansible_env.HOME }}/services/unifi_network_application" + unifi_network_application_service_dir: "{{ ansible_env.HOME }}/.services/unifi_network_application" - name: Create Unifi Network App directories ansible.builtin.file: diff --git a/config/ansible/tasks/servers/services/wireguard/wireguard.yml b/config/ansible/tasks/servers/services/wireguard/wireguard.yml index c817cc2..68b0eb8 100644 --- a/config/ansible/tasks/servers/services/wireguard/wireguard.yml +++ b/config/ansible/tasks/servers/services/wireguard/wireguard.yml @@ -3,7 +3,7 @@ block: - name: Set WireGuard directories ansible.builtin.set_fact: - wireguard_service_dir: "{{ ansible_env.HOME }}/services/wireguard" + wireguard_service_dir: "{{ ansible_env.HOME }}/.services/wireguard" wireguard_data_dir: "/mnt/services/wireguard" - name: Create WireGuard directory From dd1b961af04227ef9cec43393f833cec0bee99b8 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Wed, 23 Jul 2025 14:29:49 +0200 Subject: [PATCH 09/11] fix: set default ssh sock based on what is available instead of forcing 1password locally Signed-off-by: Menno van Leeuwen --- .bashrc | 59 +++++++++++++++++++++++++---------------------- config/ssh/config | 1 - 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/.bashrc b/.bashrc index 5e146b0..0b53fc9 100644 --- a/.bashrc +++ b/.bashrc @@ -12,7 +12,7 @@ fi if [ -f /etc/os-release ]; then distro=$(awk -F= '/^NAME/{print $ssss2}' /etc/os-release | tr -d '"') if [[ "$distro" == *"Pop!_OS"* ]]; then - export CGO_CFLAGS="-I/usr/include" + export CGO_CFLAGS="-I/usr/include" fi fi @@ -22,6 +22,11 @@ if [[ "$(uname -a)" == *"microsoft-standard-WSL2"* ]]; then alias winget='winget.exe' fi +# Set SSH_AUTH_SOCK to ~/.1password/agent.sock, but only if we don't already have a SSH_AUTH_SOCK +if [ -z "$SSH_AUTH_SOCK" ]; then + export SSH_AUTH_SOCK=~/.1password/agent.sock +fi + # Docker Compose Alias (Mostly for old shell scripts) alias docker-compose='docker compose' @@ -77,7 +82,7 @@ inuse() { local CYAN='\033[0;36m' local BOLD='\033[1m' local NC='\033[0m' # No Color - + # Input validation if [ $# -eq 0 ]; then echo -e "${RED}Usage:${NC} inuse " @@ -87,7 +92,7 @@ inuse() { echo -e "${YELLOW} inuse --list${NC}" return 1 fi - + # Handle --help option if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then echo -e "${CYAN}${BOLD}inuse - Check if a port is in use${NC}" @@ -114,24 +119,24 @@ inuse() { echo return 0 fi - + # Handle --list option if [ "$1" = "--list" ] || [ "$1" = "-l" ]; then if ! command -v docker >/dev/null 2>&1; then echo -e "${RED}Error:${NC} Docker is not available" return 1 fi - + echo -e "${CYAN}${BOLD}Docker Services with Listening Ports:${NC}" echo - + # Get all running containers local containers=$(docker ps --format "{{.Names}}" 2>/dev/null) if [ -z "$containers" ]; then echo -e "${YELLOW}No running Docker containers found${NC}" return 0 fi - + local found_services=false while IFS= read -r container; do # Get port mappings for this container @@ -140,9 +145,9 @@ inuse() { # Get container image name (clean it up) local image=$(docker inspect "$container" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1) local clean_image=$(echo "$image" | sed 's/sha256:[a-f0-9]*/[image-hash]/' | sed 's/^.*\///') - + echo -e "${GREEN}πŸ“¦ ${BOLD}$container${NC} ${CYAN}($clean_image)${NC}" - + # Parse and display ports nicely echo "$ports" | while IFS= read -r port_line; do if [[ "$port_line" =~ ([0-9]+)/(tcp|udp).*0\.0\.0\.0:([0-9]+) ]]; then @@ -161,7 +166,7 @@ inuse() { found_services=true fi done <<< "$containers" - + # Also check for host networking containers local host_containers=$(docker ps --format "{{.Names}}" --filter "network=host" 2>/dev/null) if [ -n "$host_containers" ]; then @@ -174,22 +179,22 @@ inuse() { echo found_services=true fi - + if [ "$found_services" = false ]; then echo -e "${YELLOW}No Docker services with exposed ports found${NC}" fi - + return 0 fi - + local port="$1" - + # Validate port number if ! [[ "$port" =~ ^[0-9]+$ ]] || [ "$port" -lt 1 ] || [ "$port" -gt 65535 ]; then echo -e "${RED}Error:${NC} Invalid port number. Must be between 1 and 65535." return 1 fi - + # Check if port is in use first local port_in_use=false if command -v ss >/dev/null 2>&1; then @@ -201,15 +206,15 @@ inuse() { port_in_use=true fi fi - + if [ "$port_in_use" = false ]; then echo -e "${RED}βœ— Port $port is FREE${NC}" return 1 fi - + # Port is in use, now find what's using it local found_process=false - + # Method 1: Try netstat first (most reliable for PID info) if command -v netstat >/dev/null 2>&1; then local netstat_result=$(netstat -tulpn 2>/dev/null | grep ":$port ") @@ -218,7 +223,7 @@ inuse() { local pid=$(echo "$line" | awk '{print $7}' | cut -d'/' -f1) local process_name=$(echo "$line" | awk '{print $7}' | cut -d'/' -f2) local protocol=$(echo "$line" | awk '{print $1}') - + if [[ "$pid" =~ ^[0-9]+$ ]] && [ -n "$process_name" ]; then # Check if it's a Docker container local docker_info="" @@ -246,14 +251,14 @@ inuse() { fi fi fi - + echo -e "${GREEN}βœ“ Port $port ($protocol) in use by ${BOLD}$process_name${NC} ${GREEN}as PID ${BOLD}$pid${NC}$docker_info" found_process=true fi done <<< "$netstat_result" fi fi - + # Method 2: Try ss if netstat didn't work if [ "$found_process" = false ] && command -v ss >/dev/null 2>&1; then local ss_result=$(ss -tulpn 2>/dev/null | grep ":$port ") @@ -261,7 +266,7 @@ inuse() { while IFS= read -r line; do local pid=$(echo "$line" | grep -o 'pid=[0-9]*' | cut -d'=' -f2) local protocol=$(echo "$line" | awk '{print $1}') - + if [[ "$pid" =~ ^[0-9]+$ ]]; then local process_name=$(ps -p "$pid" -o comm= 2>/dev/null) if [ -n "$process_name" ]; then @@ -287,7 +292,7 @@ inuse() { fi fi fi - + echo -e "${GREEN}βœ“ Port $port ($protocol) in use by ${BOLD}$process_name${NC} ${GREEN}as PID ${BOLD}$pid${NC}$docker_info" found_process=true fi @@ -295,7 +300,7 @@ inuse() { done <<< "$ss_result" fi fi - + # Method 3: Try fuser as last resort if [ "$found_process" = false ] && command -v fuser >/dev/null 2>&1; then local fuser_pids=$(fuser "$port/tcp" 2>/dev/null) @@ -312,7 +317,7 @@ inuse() { done fi fi - + # Method 4: Check for Docker containers more accurately if [ "$found_process" = false ] && command -v docker >/dev/null 2>&1; then # First, try to find containers with published ports matching our port @@ -343,13 +348,13 @@ inuse() { fi fi fi - + # If we still haven't found the process, show a generic message if [ "$found_process" = false ]; then echo -e "${YELLOW}⚠ Port $port is in use but unable to identify the process${NC}" echo -e "${CYAN} This might be due to insufficient permissions or the process being in a different namespace${NC}" fi - + return 0 } diff --git a/config/ssh/config b/config/ssh/config index 71b5907..ebbf56b 100644 --- a/config/ssh/config +++ b/config/ssh/config @@ -1,5 +1,4 @@ Host * - IdentityAgent ~/.1password/agent.sock AddKeysToAgent yes ForwardAgent yes From d31d07e0a0170f6a7845438aa8773649fdbd54d1 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Wed, 23 Jul 2025 14:30:18 +0200 Subject: [PATCH 10/11] fix: clean & reformat gitconfig files Signed-off-by: Menno van Leeuwen --- config/git/gitconfig.linux | 34 +++++++++++++++--------------- config/git/gitconfig.macos | 6 +++--- config/git/gitconfig.mennos-server | 28 ------------------------ config/git/gitconfig.wsl | 34 +++++++++++++++--------------- 4 files changed, 37 insertions(+), 65 deletions(-) delete mode 100644 config/git/gitconfig.mennos-server diff --git a/config/git/gitconfig.linux b/config/git/gitconfig.linux index bf38992..0528522 100644 --- a/config/git/gitconfig.linux +++ b/config/git/gitconfig.linux @@ -1,43 +1,43 @@ [user] signingkey = ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM+sKpcREOUjwMMSzEWAso6830wbOi8kUxqpuXWw5gHr - email = menno@vleeuwen.me - name = Menno van Leeuwen + email = menno@vleeuwen.me + name = Menno van Leeuwen [gpg] format = ssh [gpg "ssh"] - program = /opt/1Password/op-ssh-sign + program = /opt/1Password/op-ssh-sign [commit] gpgsign = true [color] - ui = true + ui = true [push] - autoSetupRemote = true + autoSetupRemote = true [pull] - rebase = false + rebase = false [filter "lfs"] - process = git-lfs filter-process - required = true - clean = git-lfs clean -- %f - smudge = git-lfs smudge -- %f + process = git-lfs filter-process + required = true + clean = git-lfs clean -- %f + smudge = git-lfs smudge -- %f [init] - defaultBranch = main + defaultBranch = main [credential "https://github.com"] - helper = - helper = !/home/menno/.nix-profile/bin/gh auth git-credential + helper = + helper = !/home/menno/.nix-profile/bin/gh auth git-credential [credential "https://gist.github.com"] - helper = - helper = !/home/menno/.nix-profile/bin/gh auth git-credential + helper = + helper = !/home/menno/.nix-profile/bin/gh auth git-credential [safe] - directory = * + directory = * [alias] - pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" + pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" diff --git a/config/git/gitconfig.macos b/config/git/gitconfig.macos index 5481a8b..e8b66c7 100644 --- a/config/git/gitconfig.macos +++ b/config/git/gitconfig.macos @@ -8,9 +8,9 @@ [commit] gpgsign = true - + [safe] - directory = * + directory = * [alias] - pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" + pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" diff --git a/config/git/gitconfig.mennos-server b/config/git/gitconfig.mennos-server deleted file mode 100644 index 2cec89d..0000000 --- a/config/git/gitconfig.mennos-server +++ /dev/null @@ -1,28 +0,0 @@ -[user] - signingkey = ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM+sKpcREOUjwMMSzEWAso6830wbOi8kUxqpuXWw5gHr - email = menno@vleeuwen.me - name = Menno van Leeuwen - -[color] - ui = true - -[push] - autoSetupRemote = true - -[pull] - rebase = false - -[filter "lfs"] - process = git-lfs filter-process - required = true - clean = git-lfs clean -- %f - smudge = git-lfs smudge -- %f - -[init] - defaultBranch = main - -[safe] - directory = * - -[alias] - pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" diff --git a/config/git/gitconfig.wsl b/config/git/gitconfig.wsl index b32ff24..c95427e 100644 --- a/config/git/gitconfig.wsl +++ b/config/git/gitconfig.wsl @@ -1,7 +1,7 @@ [user] signingkey = ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM+sKpcREOUjwMMSzEWAso6830wbOi8kUxqpuXWw5gHr - email = menno@vleeuwen.me - name = Menno van Leeuwen + email = menno@vleeuwen.me + name = Menno van Leeuwen [gpg] format = ssh @@ -13,31 +13,31 @@ gpgsign = true [color] - ui = true + ui = true [push] - autoSetupRemote = true + autoSetupRemote = true [pull] - rebase = false + rebase = false [filter "lfs"] - process = git-lfs filter-process - required = true - clean = git-lfs clean -- %f - smudge = git-lfs smudge -- %f + process = git-lfs filter-process + required = true + clean = git-lfs clean -- %f + smudge = git-lfs smudge -- %f [init] - defaultBranch = main + defaultBranch = main [credential "https://github.com"] - helper = - helper = !/home/menno/.nix-profile/bin/gh auth git-credential + helper = + helper = !/home/menno/.nix-profile/bin/gh auth git-credential [credential "https://gist.github.com"] - helper = - helper = !/home/menno/.nix-profile/bin/gh auth git-credential + helper = + helper = !/home/menno/.nix-profile/bin/gh auth git-credential [safe] - directory = * - + directory = * + [alias] - pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" + pushall = "!f() { for var in $(git remote show); do echo \"pushing to $var\"; git push $var; done; }; f" From 2b1c714375b6050c27514fc6e1ed7db51fb22210 Mon Sep 17 00:00:00 2001 From: Menno van Leeuwen Date: Wed, 23 Jul 2025 14:43:05 +0200 Subject: [PATCH 11/11] updated utils.yml to work with latest ansible Signed-off-by: Menno van Leeuwen --- config/ansible/tasks/global/utils.yml | 64 ++++++++++++++------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/config/ansible/tasks/global/utils.yml b/config/ansible/tasks/global/utils.yml index 004c946..9c4bcbd 100644 --- a/config/ansible/tasks/global/utils.yml +++ b/config/ansible/tasks/global/utils.yml @@ -1,35 +1,39 @@ --- -- name: Load DOTFILES_PATH environment variable - ansible.builtin.set_fact: - dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}" - become: false +- name: Process utils files + block: + - name: Load DOTFILES_PATH environment variable + ansible.builtin.set_fact: + dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}" + become: false -- name: Ensure ~/.local/bin exists - ansible.builtin.file: - path: "{{ ansible_env.HOME }}/.local/bin" - state: directory - mode: "0755" - become: false + - name: Ensure ~/.local/bin exists + ansible.builtin.file: + path: "{{ ansible_env.HOME }}/.local/bin" + state: directory + mode: "0755" + become: false -- name: Scan utils folder and create symlinks in ~/.local/bin - ansible.builtin.find: - paths: "{{ dotfiles_path }}/config/ansible/tasks/global/utils" - file_type: file - register: utils_files - become: false + - name: Scan utils folder and create symlinks in ~/.local/bin + ansible.builtin.find: + paths: "{{ dotfiles_path }}/config/ansible/tasks/global/utils" + file_type: file + register: utils_files + become: false -- name: Create symlinks for utils scripts - ansible.builtin.file: - src: "{{ item.path }}" - dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}" - state: link - loop: "{{ utils_files.files }}" - when: not item.path | regex_search('\.go$') - become: false + - name: Create symlinks for utils scripts + ansible.builtin.file: + src: "{{ item.path }}" + dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}" + state: link + loop: "{{ utils_files.files }}" + when: not item.path.endswith('.go') + become: false -- name: Compile Go files and place binaries in ~/.local/bin - ansible.builtin.command: - cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}" - loop: "{{ utils_files.files }}" - when: item.path | regex_search('\.go$') - become: false + - name: Compile Go files and place binaries in ~/.local/bin + ansible.builtin.command: + cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}" + loop: "{{ utils_files.files }}" + when: item.path.endswith('.go') + become: false + tags: + - utils