This commit is contained in:
2025-09-23 13:19:48 +00:00
parent a04a4abef6
commit dd3753fab4
170 changed files with 907 additions and 1715 deletions

25
ansible/README.md Normal file
View File

@@ -0,0 +1,25 @@
# Ansible Configuration
## 1Password Integration
This Ansible configuration includes a custom lookup plugin for fetching secrets from 1Password.
The 1Password CLI must be installed and authenticated on the machine running Ansible.
See [1Password Integration Readme](plugins/lookup/README.md)
### Prerequisites
1. Install 1Password CLI
2. Sign in to 1Password using `op signin`
3. Service account should be properly configured
### Finding Vault IDs
To find your vault ID:
```bash
op vault list
```
For more information, see the [1Password CLI documentation](https://developer.1password.com/docs/cli).
```

5
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,5 @@
[defaults]
inventory = inventory
roles_path = roles
collections_paths = collections
retry_files_enabled = False

View File

@@ -0,0 +1,31 @@
---
flatpaks: false
install_ui_apps: false
# Countries that are allowed to access the server Caddy reverse proxy
allowed_countries_codes:
- US # United States
- CA # Canada
- GB # United Kingdom
- DE # Germany
- FR # France
- ES # Spain
- IT # Italy
- NL # Netherlands
- AU # Australia
- NZ # New Zealand
- JP # Japan
- KR # South Korea
- SK # Slovakia
- FI # Finland
- DK # Denmark
- SG # Singapore
- AT # Austria
- CH # Switzerland
# IP ranges for blocked countries (generated automatically)
# This will be populated by the country blocking script
blocked_countries: []
# Enable/disable country blocking globally
enable_country_blocking: true

View File

@@ -0,0 +1,3 @@
---
flatpaks: true
install_ui_apps: true

30
ansible/handlers/main.yml Normal file
View File

@@ -0,0 +1,30 @@
---
- name: Systemctl daemon-reload
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: Restart SSH service
become: true
ansible.builtin.service:
name: ssh
state: restarted
enabled: true
- name: reload systemd
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: restart borg-local-sync
become: true
ansible.builtin.systemd:
name: borg-local-sync.service
enabled: true
- name: restart borg-local-sync-timer
become: true
ansible.builtin.systemd:
name: borg-local-sync.timer
state: restarted
enabled: true

8
ansible/inventory.ini Normal file
View File

@@ -0,0 +1,8 @@
[workstations]
mennos-laptop ansible_connection=local
mennos-desktop ansible_connection=local
[servers]
mennos-vps ansible_connection=local
mennos-vm ansible_connection=local
mennos-desktop ansible_connection=local

19
ansible/playbook.yml Normal file
View File

@@ -0,0 +1,19 @@
---
- name: Configure all hosts
hosts: all
handlers:
- name: Import handler tasks
ansible.builtin.import_tasks: handlers/main.yml
gather_facts: true
tasks:
- name: Include global tasks
ansible.builtin.import_tasks: tasks/global/global.yml
- name: Include workstation tasks
ansible.builtin.import_tasks: tasks/workstations/workstation.yml
when: inventory_hostname in ['mennos-laptop', 'mennos-desktop']
- name: Include server tasks
ansible.builtin.import_tasks: tasks/servers/server.yml
when: inventory_hostname in ['mennos-server', 'mennos-hobbypc', 'mennos-vm', 'mennos-desktop']

4
ansible/requirements.yml Normal file
View File

@@ -0,0 +1,4 @@
---
# Collections section
collections:
- community.general

View File

@@ -0,0 +1,53 @@
---
- name: Check if Docker CE is installed
ansible.builtin.command: docker --version
register: docker_check
changed_when: false
failed_when: false
# Arch-based distributions (CachyOS, Arch Linux, etc.)
- name: Install Docker on Arch-based systems
community.general.pacman:
name:
- docker
- docker-compose
- docker-buildx
state: present
become: true
when: docker_check.rc != 0 and ansible_pkg_mgr == 'pacman'
# Non-Arch distributions
- name: Download Docker installation script
ansible.builtin.get_url:
url: https://get.docker.com
dest: /tmp/get-docker.sh
mode: "0755"
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Install Docker CE on non-Arch systems
ansible.builtin.shell: bash -c 'set -o pipefail && sh /tmp/get-docker.sh'
args:
executable: /bin/bash
creates: /usr/bin/docker
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Add user to docker group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
become: true
when: docker_check.rc != 0
- name: Enable and start docker service
ansible.builtin.systemd:
name: docker
state: started
enabled: true
become: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
become: true
notify: Reload systemd

View File

@@ -0,0 +1,137 @@
---
- name: Include global symlinks tasks
ansible.builtin.import_tasks: tasks/global/symlinks.yml
- name: Gather package facts
ansible.builtin.package_facts:
manager: auto
become: true
- name: Debug ansible_facts for troubleshooting
ansible.builtin.debug:
msg: |
OS Family: {{ ansible_facts['os_family'] }}
Distribution: {{ ansible_facts['distribution'] }}
Package Manager: {{ ansible_pkg_mgr }}
Kernel: {{ ansible_kernel }}
tags: debug
- name: Include Tailscale tasks
ansible.builtin.import_tasks: tasks/global/tailscale.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Docker tasks
ansible.builtin.import_tasks: tasks/global/docker.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Ollama tasks
ansible.builtin.import_tasks: tasks/global/ollama.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include OpenSSH Server tasks
ansible.builtin.import_tasks: tasks/global/openssh-server.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Ensure common packages are installed on Arch-based systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python
- python-pip
- python-pipx
- python-pylint
- go
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure common packages are installed on non-Arch systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python3
- python3-pip
- python3-venv
- pylint
- black
- pipx
- nala
- golang
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Configure performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "vm.max_map_count", value: "16777216" }
# --- PBinCLI via pipx ---
- name: Ensure pbincli is installed with pipx
ansible.builtin.command: pipx install pbincli
args:
creates: ~/.local/bin/pbincli
environment:
PIPX_DEFAULT_PYTHON: /usr/bin/python3
become: false
- name: Ensure ~/.config/pbincli directory exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.config/pbincli"
state: directory
mode: "0755"
- name: Configure pbincli to use custom server
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.config/pbincli/pbincli.conf"
content: |
server=https://bin.mvl.sh
mode: "0644"
- name: Include WSL2 tasks
ansible.builtin.import_tasks: tasks/global/wsl.yml
when: "'microsoft-standard-WSL2' in ansible_kernel"
- name: Include Utils tasks
ansible.builtin.import_tasks: tasks/global/utils.yml
become: true
tags: utils
- name: Ensure ~/.hushlogin exists
ansible.builtin.stat:
path: ~/.hushlogin
register: hushlogin_stat
- name: Create ~/.hushlogin if it does not exist
ansible.builtin.file:
path: ~/.hushlogin
state: touch
mode: "0644"
when: not hushlogin_stat.stat.exists
# Ensure pwfeedback is enabled in sudoers for better password UX
- name: Ensure pwfeedback is present in Defaults env_reset line in /etc/sudoers
ansible.builtin.replace:
path: /etc/sudoers
regexp: '^Defaults\s+env_reset(?!.*pwfeedback)'
replace: 'Defaults env_reset,pwfeedback'
validate: 'visudo -cf %s'
become: true
tags: sudoers

View File

@@ -0,0 +1,27 @@
---
- name: Check if Ollama is installed
ansible.builtin.command: ollama --version
register: ollama_check
changed_when: false
failed_when: false
- name: Download Ollama install script
ansible.builtin.get_url:
url: https://ollama.com/install.sh
dest: /tmp/install_ollama.sh
mode: "0755"
when: ollama_check.rc != 0
- name: Install Ollama
ansible.builtin.command: bash -c 'set -o pipefail && sh /tmp/install_ollama.sh'
when: ollama_check.rc != 0
args:
creates: /usr/local/bin/ollama
- name: Check if Ollama is running
ansible.builtin.systemd:
name: ollama
state: started
enabled: true
become: true
register: ollama_service

View File

@@ -0,0 +1,36 @@
---
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH service is enabled and running on Arch-based systems
ansible.builtin.service:
name: sshd
state: started
enabled: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure SSH service is enabled and running on non-Arch systems
ansible.builtin.service:
name: ssh
state: started
enabled: true
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH server configuration is proper
ansible.builtin.template:
src: templates/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
validate: "/usr/sbin/sshd -t -f %s"
notify: Restart SSH service

View File

@@ -0,0 +1,32 @@
---
- name: Check if Tailscale is installed
ansible.builtin.command: which tailscale
register: tailscale_check
changed_when: false
failed_when: false
- name: Install Tailscale using curl script
ansible.builtin.shell: curl -fsSL https://tailscale.com/install.sh | sh
args:
creates: /usr/bin/tailscale
when: tailscale_check.rc != 0
become: true
- name: Check if Tailscale is running
ansible.builtin.command: tailscale status
register: tailscale_status
changed_when: false
failed_when: false
- name: Enable and start Tailscale service
ansible.builtin.systemd:
name: tailscaled
state: started
enabled: true
daemon_reload: true
become: true
- name: Notify user to authenticate Tailscale
ansible.builtin.debug:
msg: "Please authenticate Tailscale by running: sudo tailscale up --operator=$USER"
when: tailscale_status.rc != 0

View File

@@ -0,0 +1,62 @@
---
- name: Process utils files
block:
- name: Load DOTFILES_PATH environment variable
ansible.builtin.set_fact:
dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}"
become: false
- name: Ensure ~/.local/bin exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.local/bin"
state: directory
mode: "0755"
become: false
- name: Scan utils folder for files
ansible.builtin.find:
paths: "{{ dotfiles_path }}/ansible/tasks/global/utils"
file_type: file
register: utils_files
become: false
- name: Scan utils folder for Go projects (directories with go.mod)
ansible.builtin.find:
paths: "{{ dotfiles_path }}/ansible/tasks/global/utils"
file_type: directory
recurse: true
register: utils_dirs
become: false
- name: Filter directories that contain go.mod files
ansible.builtin.stat:
path: "{{ item.path }}/go.mod"
loop: "{{ utils_dirs.files }}"
register: go_mod_check
become: false
- name: Create symlinks for utils scripts
ansible.builtin.file:
src: "{{ item.path }}"
dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}"
state: link
loop: "{{ utils_files.files }}"
when: not item.path.endswith('.go')
become: false
- name: Compile standalone Go files and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}"
loop: "{{ utils_files.files }}"
when: item.path.endswith('.go')
become: false
- name: Compile Go projects and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.item.path | basename }}" .
chdir: "{{ item.item.path }}"
loop: "{{ go_mod_check.results }}"
when: item.stat.exists
become: false
tags:
- utils

View File

@@ -0,0 +1,124 @@
# Dynamic DNS OnePassword Setup
This document explains how to set up the required OnePassword entries for the Dynamic DNS automation.
## Overview
The Dynamic DNS task automatically retrieves credentials from OnePassword using the Ansible OnePassword lookup plugin. This eliminates the need for vault files and provides better security.
## Required OnePassword Entries
### 1. CloudFlare API Token
**Location:** `CloudFlare API Token` in `Dotfiles` vault, field `password`
**Setup Steps:**
1. Go to [CloudFlare API Tokens](https://dash.cloudflare.com/profile/api-tokens)
2. Click "Create Token"
3. Use the "Edit zone DNS" template
4. Configure permissions:
- Zone: DNS: Edit
- Zone Resources: Include all zones (or specific zones for your domains)
5. Add IP address filtering if desired (optional but recommended)
6. Click "Continue to summary" and "Create Token"
7. Copy the token and save it in OnePassword:
- Title: `CloudFlare API Token`
- Vault: `Dotfiles`
- Field: `password` (this should be the main password field)
### 2. Telegram Bot Credentials
**Location:** `Telegram DynDNS Bot` in `Dotfiles` vault, fields `password` and `chat_id`
**Setup Steps:**
#### Create Telegram Bot:
1. Message [@BotFather](https://t.me/BotFather) on Telegram
2. Send `/start` then `/newbot`
3. Follow the prompts to create your bot
4. Save the bot token (format: `123456789:ABCdefGHijklMNopQRstUVwxyz`)
#### Get Chat ID:
1. Send any message to your new bot
2. Visit: `https://api.telegram.org/bot<YOUR_BOT_TOKEN>/getUpdates`
3. Look for `"chat":{"id":YOUR_CHAT_ID}` in the response
4. Save the chat ID (format: `987654321` or `-987654321` for groups)
#### Save in OnePassword:
- Title: `Telegram DynDNS Bot`
- Vault: `Dotfiles`
- Fields:
- `password`: Your bot token (123456789:ABCdefGHijklMNopQRstUVwxyz)
- `chat_id`: Your chat ID (987654321)
## Verification
You can test that the OnePassword lookups work by running:
```bash
# Test CloudFlare token lookup
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}"
# Test Telegram bot token
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}"
# Test Telegram chat ID
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}"
```
## Security Notes
- Credentials are never stored in version control
- Environment file (`~/.local/bin/dynamic-dns.env`) has 600 permissions
- OnePassword CLI must be authenticated before running Ansible
- Make sure to run `op signin` before executing the playbook
## Troubleshooting
### OnePassword CLI Not Authenticated
```bash
op signin
```
### Missing Fields in OnePassword
Ensure the exact field names match:
- CloudFlare: field must be named `password`
- Telegram: fields must be named `password` and `chat_id`
### Invalid CloudFlare Token
- Check token has `Zone:DNS:Edit` permissions
- Verify token is active in CloudFlare dashboard
- Test with: `curl -H "Authorization: Bearer YOUR_TOKEN" https://api.cloudflare.com/client/v4/user/tokens/verify`
### Telegram Not Working
- Ensure you've sent at least one message to your bot
- Verify chat ID format (numbers only, may start with -)
- Test with: `go run dynamic-dns-cf.go --test-telegram`
## Usage
Once set up, the dynamic DNS will automatically:
- Update DNS records every 15 minutes
- Send Telegram notifications when IP changes
- Log all activity to system journal (`journalctl -t dynamic-dns`)
## Domains Configured
The automation updates these domains:
- `vleeuwen.me`
- `mvl.sh`
- `mennovanleeuwen.nl`
To modify the domain list, edit the wrapper script at:
`~/.local/bin/dynamic-dns-update.sh`

View File

@@ -0,0 +1,903 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// CloudFlare API structures
type CloudFlareResponse struct {
Success bool `json:"success"`
Errors []CloudFlareError `json:"errors"`
Result json.RawMessage `json:"result"`
Messages []CloudFlareMessage `json:"messages"`
}
type CloudFlareError struct {
Code int `json:"code"`
Message string `json:"message"`
}
type CloudFlareMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
type DNSRecord struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Content string `json:"content"`
TTL int `json:"ttl"`
ZoneID string `json:"zone_id"`
}
type Zone struct {
ID string `json:"id"`
Name string `json:"name"`
}
type TokenVerification struct {
ID string `json:"id"`
Status string `json:"status"`
}
type NotificationInfo struct {
RecordName string
OldIP string
NewIP string
IsNew bool
}
// Configuration
type Config struct {
APIToken string
RecordNames []string
IPSources []string
DryRun bool
Verbose bool
Force bool
TTL int
TelegramBotToken string
TelegramChatID string
Client *http.Client
}
// Default IP sources
var defaultIPSources = []string{
"https://ifconfig.co/ip",
"https://ip.seeip.org",
"https://ipv4.icanhazip.com",
"https://api.ipify.org",
}
func main() {
config := &Config{
Client: &http.Client{Timeout: 10 * time.Second},
}
// Command line flags
var ipSourcesFlag string
var recordsFlag string
var listZones bool
var testTelegram bool
flag.StringVar(&recordsFlag, "record", "", "DNS A record name(s) to update - comma-separated for multiple (required)")
flag.StringVar(&ipSourcesFlag, "ip-sources", "", "Comma-separated list of IP detection services (optional)")
flag.BoolVar(&config.DryRun, "dry-run", false, "Show what would be done without making changes")
flag.BoolVar(&config.Verbose, "verbose", false, "Enable verbose logging")
flag.BoolVar(&listZones, "list-zones", false, "List all accessible zones and exit")
flag.BoolVar(&config.Force, "force", false, "Force update even if IP hasn't changed")
flag.BoolVar(&testTelegram, "test-telegram", false, "Send a test Telegram notification and exit")
flag.IntVar(&config.TTL, "ttl", 300, "TTL for DNS record in seconds")
// Custom usage function
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "CloudFlare Dynamic DNS Tool\n\n")
fmt.Fprintf(os.Stderr, "Updates CloudFlare DNS A records with your current public IP address.\n")
fmt.Fprintf(os.Stderr, "Supports multiple records, dry-run mode, and Telegram notifications.\n\n")
fmt.Fprintf(os.Stderr, "USAGE:\n")
fmt.Fprintf(os.Stderr, " %s [OPTIONS]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "REQUIRED ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " CLOUDFLARE_API_TOKEN CloudFlare API token with Zone:DNS:Edit permissions\n")
fmt.Fprintf(os.Stderr, " Get from: https://dash.cloudflare.com/profile/api-tokens\n\n")
fmt.Fprintf(os.Stderr, "OPTIONAL ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_BOT_TOKEN Telegram bot token for notifications\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_CHAT_ID Telegram chat ID to send notifications to\n\n")
fmt.Fprintf(os.Stderr, "OPTIONS:\n")
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "\nEXAMPLES:\n")
fmt.Fprintf(os.Stderr, " # Update single record\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Update multiple records\n")
fmt.Fprintf(os.Stderr, " %s -record \"home.example.com,api.example.com,vpn.mydomain.net\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Dry run with verbose output\n")
fmt.Fprintf(os.Stderr, " %s -dry-run -verbose -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Force update even if IP hasn't changed\n")
fmt.Fprintf(os.Stderr, " %s -force -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Custom TTL and IP sources\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com -ttl 600 -ip-sources \"https://ifconfig.co/ip,https://api.ipify.org\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # List accessible CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " %s -list-zones\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Test Telegram notifications\n")
fmt.Fprintf(os.Stderr, " %s -test-telegram\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "SETUP:\n")
fmt.Fprintf(os.Stderr, " 1. Create CloudFlare API token:\n")
fmt.Fprintf(os.Stderr, " - Go to https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, " - Use 'Edit zone DNS' template\n")
fmt.Fprintf(os.Stderr, " - Select your zones\n")
fmt.Fprintf(os.Stderr, " - Copy token and set CLOUDFLARE_API_TOKEN environment variable\n\n")
fmt.Fprintf(os.Stderr, " 2. Optional: Setup Telegram notifications:\n")
fmt.Fprintf(os.Stderr, " - Message @BotFather on Telegram to create a bot\n")
fmt.Fprintf(os.Stderr, " - Get your chat ID by messaging your bot, then visit:\n")
fmt.Fprintf(os.Stderr, " https://api.telegram.org/bot<BOT_TOKEN>/getUpdates\n")
fmt.Fprintf(os.Stderr, " - Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables\n\n")
fmt.Fprintf(os.Stderr, "NOTES:\n")
fmt.Fprintf(os.Stderr, " - Records can be in different CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " - Only updates when IP actually changes (unless -force is used)\n")
fmt.Fprintf(os.Stderr, " - Supports both root domains and subdomains\n")
fmt.Fprintf(os.Stderr, " - Telegram notifications sent only when IP changes\n")
fmt.Fprintf(os.Stderr, " - Use -dry-run to test without making changes\n\n")
}
flag.Parse()
// Validate required arguments (unless listing zones or testing telegram)
if recordsFlag == "" && !listZones && !testTelegram {
fmt.Fprintf(os.Stderr, "Error: -record flag is required\n")
flag.Usage()
os.Exit(1)
}
// Parse record names
if recordsFlag != "" {
config.RecordNames = strings.Split(recordsFlag, ",")
// Trim whitespace from each record name
for i, record := range config.RecordNames {
config.RecordNames[i] = strings.TrimSpace(record)
}
}
// Get API token from environment
config.APIToken = os.Getenv("CLOUDFLARE_API_TOKEN")
if config.APIToken == "" {
fmt.Fprintf(os.Stderr, "Error: CLOUDFLARE_API_TOKEN environment variable is required\n")
fmt.Fprintf(os.Stderr, "Get your API token from: https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, "Create a token with 'Zone:DNS:Edit' permissions for your zone\n")
os.Exit(1)
}
// Get optional Telegram credentials
config.TelegramBotToken = os.Getenv("TELEGRAM_BOT_TOKEN")
config.TelegramChatID = os.Getenv("TELEGRAM_CHAT_ID")
if config.Verbose && config.TelegramBotToken != "" && config.TelegramChatID != "" {
fmt.Println("Telegram notifications enabled")
}
// Parse IP sources
if ipSourcesFlag != "" {
config.IPSources = strings.Split(ipSourcesFlag, ",")
} else {
config.IPSources = defaultIPSources
}
if config.Verbose {
fmt.Printf("Config: Records=%v, TTL=%d, DryRun=%v, Force=%v, IPSources=%v\n",
config.RecordNames, config.TTL, config.DryRun, config.Force, config.IPSources)
}
// If testing telegram, do that and exit (skip API token validation)
if testTelegram {
if err := testTelegramNotification(config); err != nil {
fmt.Fprintf(os.Stderr, "Error testing Telegram: %v\n", err)
os.Exit(1)
}
return
}
// Validate API token
if err := validateToken(config); err != nil {
fmt.Fprintf(os.Stderr, "Error validating API token: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Println("API token validated successfully")
}
// If listing zones, do that and exit
if listZones {
if err := listAllZones(config); err != nil {
fmt.Fprintf(os.Stderr, "Error listing zones: %v\n", err)
os.Exit(1)
}
return
}
// Get current public IP
currentIP, err := getCurrentIP(config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting current IP: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Printf("Current public IP: %s\n", currentIP)
fmt.Printf("Processing %d record(s)\n", len(config.RecordNames))
}
// Process each record
var totalUpdates int
var allNotifications []NotificationInfo
for _, recordName := range config.RecordNames {
if config.Verbose {
fmt.Printf("\n--- Processing record: %s ---\n", recordName)
}
// Find the zone for the record
zoneName, zoneID, err := findZoneForRecord(config, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding zone for %s: %v\n", recordName, err)
continue
}
if config.Verbose {
fmt.Printf("Found zone: %s (ID: %s)\n", zoneName, zoneID)
}
// Find existing DNS record
record, err := findDNSRecordByName(config, zoneID, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding DNS record %s: %v\n", recordName, err)
continue
}
// Compare IPs
if record != nil {
if record.Content == currentIP && !config.Force {
fmt.Printf("DNS record %s already points to %s - no update needed\n", recordName, currentIP)
continue
}
if config.Verbose {
if record.Content == currentIP {
fmt.Printf("DNS record %s already points to %s, but forcing update\n",
recordName, currentIP)
} else {
fmt.Printf("DNS record %s currently points to %s, needs update to %s\n",
recordName, record.Content, currentIP)
}
}
} else {
if config.Verbose {
fmt.Printf("DNS record %s does not exist, will create it\n", recordName)
}
}
// Update or create record
if config.DryRun {
if record != nil {
if record.Content == currentIP && config.Force {
fmt.Printf("DRY RUN: Would force update DNS record %s (already %s)\n",
recordName, currentIP)
} else {
fmt.Printf("DRY RUN: Would update DNS record %s from %s to %s\n",
recordName, record.Content, currentIP)
}
} else {
fmt.Printf("DRY RUN: Would create DNS record %s with IP %s\n",
recordName, currentIP)
}
// Collect notification info for dry-run
if record == nil || record.Content != currentIP || config.Force {
var oldIPForNotification string
if record != nil {
oldIPForNotification = record.Content
}
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIPForNotification,
NewIP: currentIP,
IsNew: record == nil,
})
}
continue
}
var wasUpdated bool
var oldIP string
if record != nil {
oldIP = record.Content
err = updateDNSRecordByName(config, zoneID, record.ID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully updated DNS record %s to %s\n", recordName, currentIP)
wasUpdated = true
} else {
err = createDNSRecordByName(config, zoneID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully created DNS record %s with IP %s\n", recordName, currentIP)
wasUpdated = true
}
// Collect notification info for actual updates
if wasUpdated && (record == nil || oldIP != currentIP || config.Force) {
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIP,
NewIP: currentIP,
IsNew: record == nil,
})
totalUpdates++
}
}
// Send batch notification if there were any changes
if len(allNotifications) > 0 {
sendBatchTelegramNotification(config, allNotifications, config.DryRun)
}
if !config.DryRun && config.Verbose {
fmt.Printf("\nProcessed %d record(s), %d update(s) made\n", len(config.RecordNames), totalUpdates)
}
}
func validateToken(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/user/tokens/verify", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("token validation failed: %v", cfResp.Errors)
}
var tokenInfo TokenVerification
if err := json.Unmarshal(cfResp.Result, &tokenInfo); err != nil {
return err
}
if tokenInfo.Status != "active" {
return fmt.Errorf("token is not active, status: %s", tokenInfo.Status)
}
return nil
}
func getCurrentIP(config *Config) (string, error) {
var lastError error
for _, source := range config.IPSources {
if config.Verbose {
fmt.Printf("Trying IP source: %s\n", source)
}
resp, err := config.Client.Get(source)
if err != nil {
lastError = err
if config.Verbose {
fmt.Printf("Failed to get IP from %s: %v\n", source, err)
}
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastError = err
continue
}
if resp.StatusCode != 200 {
lastError = fmt.Errorf("HTTP %d from %s", resp.StatusCode, source)
continue
}
ip := strings.TrimSpace(string(body))
if ip != "" {
return ip, nil
}
lastError = fmt.Errorf("empty response from %s", source)
}
return "", fmt.Errorf("failed to get IP from any source, last error: %v", lastError)
}
func findZoneForRecord(config *Config, recordName string) (string, string, error) {
// Extract domain from record name (e.g., "sub.example.com" -> try "example.com", "com")
parts := strings.Split(recordName, ".")
if config.Verbose {
fmt.Printf("Finding zone for record: %s\n", recordName)
}
for i := 0; i < len(parts); i++ {
zoneName := strings.Join(parts[i:], ".")
req, err := http.NewRequest("GET",
fmt.Sprintf("https://api.cloudflare.com/client/v4/zones?name=%s", zoneName), nil)
if err != nil {
continue
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
continue
}
var cfResp CloudFlareResponse
err = json.NewDecoder(resp.Body).Decode(&cfResp)
resp.Body.Close()
if err != nil || !cfResp.Success {
continue
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
continue
}
if len(zones) > 0 {
return zones[0].Name, zones[0].ID, nil
}
}
return "", "", fmt.Errorf("no zone found for record %s", recordName)
}
func findDNSRecordByName(config *Config, zoneID string, recordName string) (*DNSRecord, error) {
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records?type=A&name=%s",
zoneID, recordName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return nil, err
}
if !cfResp.Success {
return nil, fmt.Errorf("API error: %v", cfResp.Errors)
}
var records []DNSRecord
if err := json.Unmarshal(cfResp.Result, &records); err != nil {
return nil, err
}
if len(records) == 0 {
return nil, nil // Record doesn't exist
}
return &records[0], nil
}
func updateDNSRecordByName(config *Config, zoneID, recordID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s", zoneID, recordID)
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func createDNSRecordByName(config *Config, zoneID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records", zoneID)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func listAllZones(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/zones", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
return err
}
fmt.Printf("Found %d accessible zones:\n", len(zones))
for _, zone := range zones {
fmt.Printf(" - %s (ID: %s)\n", zone.Name, zone.ID)
}
if len(zones) == 0 {
fmt.Println("No zones found. Make sure your API token has Zone:Read permissions.")
}
return nil
}
func sendTelegramNotification(config *Config, record *DNSRecord, oldIP, newIP string, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if record == nil {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", newIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", oldIP, newIP, config.TTL)
}
// Prepare Telegram API request
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
// Send notification
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}
func testTelegramNotification(config *Config) error {
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return fmt.Errorf("Telegram not configured. Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables")
}
fmt.Println("Testing Telegram notification...")
// Send a test message
message := "🧪 Dynamic DNS Test\n\n" +
"This is a test notification from your CloudFlare Dynamic DNS tool.\n\n" +
"✅ Telegram integration is working correctly!"
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to marshal payload: %v", err)
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %v", err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode == 200 {
fmt.Println("✅ Test notification sent successfully!")
if config.Verbose {
fmt.Printf("Response: %s\n", string(body))
}
return nil
} else {
return fmt.Errorf("failed to send notification (HTTP %d): %s", resp.StatusCode, string(body))
}
}
func sendBatchTelegramNotification(config *Config, notifications []NotificationInfo, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
if len(notifications) == 0 {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if len(notifications) == 1 {
// Single record notification
notif := notifications[0]
if notif.IsNew {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else if notif.OldIP == notif.NewIP {
message = fmt.Sprintf("%s🔄 DNS Record Force Updated\n\n"+
"Record: %s\n"+
"IP: %s (unchanged)\n"+
"TTL: %d seconds\n"+
"Note: Forced update requested",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.OldIP, notif.NewIP, config.TTL)
}
} else {
// Multiple records notification
var newCount, updatedCount int
for _, notif := range notifications {
if notif.IsNew {
newCount++
} else {
updatedCount++
}
}
message = fmt.Sprintf("%s📋 Multiple DNS Records Updated\n\n", dryRunPrefix)
if newCount > 0 {
message += fmt.Sprintf("🆕 Created: %d record(s)\n", newCount)
}
if updatedCount > 0 {
message += fmt.Sprintf("🔄 Updated: %d record(s)\n", updatedCount)
}
message += fmt.Sprintf("\nNew IP: %s\nTTL: %d seconds\n\nRecords:", notifications[0].NewIP, config.TTL)
for _, notif := range notifications {
if notif.IsNew {
message += fmt.Sprintf("\n• %s (new)", notif.RecordName)
} else if notif.OldIP == notif.NewIP {
message += fmt.Sprintf("\n• %s (forced)", notif.RecordName)
} else {
message += fmt.Sprintf("\n• %s (%s → %s)", notif.RecordName, notif.OldIP, notif.NewIP)
}
}
}
// Send the notification using the same logic as single notifications
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}

View File

@@ -0,0 +1,328 @@
package main
import (
"encoding/csv"
"flag"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
"time"
)
type Trip struct {
StartTime time.Time
EndTime time.Time
StartAddr string
EndAddr string
KMStart float64
KMEnd float64
Distance float64
License string
BusinessCost float64
Type string
}
type MonthStats struct {
TotalKM float64
Trips int
Longest float64
Shortest float64
TotalDuration time.Duration
LongestGap time.Duration
OdoAnomalies int
AvgSpeed float64
AvgTripDuration time.Duration
FuelCost float64
}
func main() {
fuelPrice := flag.Float64("fuelprice", 0, "Fuel price per liter (EUR)")
fuelEfficiency := flag.Float64("efficiency", 0, "Fuel efficiency (km per liter)")
lPer100km := flag.Float64("lper100km", 0, "Fuel consumption (liters per 100km)")
flag.Parse()
if len(flag.Args()) < 1 {
fmt.Println("Usage: go run main.go -fuelprice <price> [-efficiency <km/l> | -lper100km <l/100km>] <filename.csv>")
flag.PrintDefaults()
return
}
// Convert l/100km to km/l if provided
finalEfficiency := *fuelEfficiency
if *lPer100km > 0 {
finalEfficiency = 100.0 / *lPer100km
}
file, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
defer file.Close()
reader := csv.NewReader(file)
reader.Comma = ','
records, err := reader.ReadAll()
if err != nil {
panic(err)
}
dutchMonths := map[string]string{
"januari": "January", "februari": "February", "maart": "March",
"april": "April", "mei": "May", "juni": "June", "juli": "July",
"augustus": "August", "september": "September", "oktober": "October",
"november": "November", "december": "December",
}
tripsByMonth := make(map[string][]Trip)
startAddrCount := make(map[string]int)
endAddrCount := make(map[string]int)
fuelEnabled := *fuelPrice > 0 && finalEfficiency > 0
// Parse CSV
for _, record := range records[1:] {
if len(record) < 13 {
continue
}
// Parse start time
startTime, err := parseDutchTime(record[1], dutchMonths)
if err != nil {
continue
}
// Parse end time
endTime, err := parseDutchTime(record[2], dutchMonths)
if err != nil {
continue
}
// Parse distance data
kmStart, _ := strconv.ParseFloat(strings.ReplaceAll(record[5], ",", ""), 64)
kmEnd, _ := strconv.ParseFloat(strings.ReplaceAll(record[6], ",", ""), 64)
distance, _ := strconv.ParseFloat(strings.ReplaceAll(record[7], ",", ""), 64)
trip := Trip{
StartTime: startTime,
EndTime: endTime,
StartAddr: record[3],
EndAddr: record[4],
KMStart: kmStart,
KMEnd: kmEnd,
Distance: distance,
License: record[8],
BusinessCost: parseFloat(record[11]),
Type: strings.TrimSpace(record[12]),
}
monthKey := fmt.Sprintf("%d-%02d", startTime.Year(), startTime.Month())
tripsByMonth[monthKey] = append(tripsByMonth[monthKey], trip)
startAddrCount[trip.StartAddr]++
endAddrCount[trip.EndAddr]++
}
// Calculate stats
months := sortedKeys(tripsByMonth)
statsByMonth := calculateStats(tripsByMonth, fuelEnabled, *fuelPrice, finalEfficiency)
// Print results
printMainTable(statsByMonth, months, fuelEnabled, tripsByMonth)
printTopAddresses(startAddrCount, endAddrCount)
}
func parseDutchTime(datetime string, monthMap map[string]string) (time.Time, error) {
parts := strings.Split(datetime, " ")
if len(parts) < 4 {
return time.Time{}, fmt.Errorf("invalid time format")
}
engMonth, ok := monthMap[strings.ToLower(parts[1])]
if !ok {
return time.Time{}, fmt.Errorf("unknown month")
}
timeStr := fmt.Sprintf("%s %s %s %s", parts[0], engMonth, parts[2], parts[3])
return time.Parse("2 January 2006 15:04", timeStr)
}
func calculateStats(tripsByMonth map[string][]Trip, fuelEnabled bool, fuelPrice, fuelEfficiency float64) map[string]MonthStats {
stats := make(map[string]MonthStats)
for month, trips := range tripsByMonth {
var s MonthStats
var prevEnd time.Time
var longestGap time.Duration
sumSpeed := 0.0
speedCount := 0
sort.Slice(trips, func(i, j int) bool {
return trips[i].StartTime.Before(trips[j].StartTime)
})
for i, t := range trips {
s.TotalKM += t.Distance
s.Trips++
duration := t.EndTime.Sub(t.StartTime)
s.TotalDuration += duration
if duration.Hours() > 0 {
sumSpeed += t.Distance / duration.Hours()
speedCount++
}
if t.Distance > s.Longest {
s.Longest = t.Distance
}
if t.Distance < s.Shortest || s.Shortest == 0 {
s.Shortest = t.Distance
}
if i > 0 {
gap := t.StartTime.Sub(prevEnd)
if gap > longestGap {
longestGap = gap
}
if math.Abs(trips[i-1].KMEnd-t.KMStart) > 0.01 {
s.OdoAnomalies++
}
}
prevEnd = t.EndTime
}
s.LongestGap = longestGap
if speedCount > 0 {
s.AvgSpeed = sumSpeed / float64(speedCount)
} else {
s.AvgSpeed = 0
}
if s.Trips > 0 {
s.AvgTripDuration = time.Duration(int64(s.TotalDuration) / int64(s.Trips))
}
if fuelEnabled {
s.FuelCost = (s.TotalKM / fuelEfficiency) * fuelPrice
}
stats[month] = s
}
return stats
}
func printMainTable(stats map[string]MonthStats, months []string, fuelEnabled bool, tripsByMonth map[string][]Trip) {
fmt.Println("\n=== Monthly Driving Overview ===")
headers := []string{"Month", "Total", "Trips", "AvgKM", "Longest", "Shortest",
"DriveTime", "AvgTripDur", "OdoErr", "AvgSpeed"}
format := "%-10s | %-16s | %-7s | %-14s | %-24s | %-26s | %-18s | %-18s | %-10s | %-18s"
if fuelEnabled {
headers = append(headers, "Fuel Cost (EUR)")
format += " | %-18s"
}
fmt.Printf(format+"\n", toInterfaceSlice(headers)...) // print header
fmt.Println(strings.Repeat("-", 180))
for _, month := range months {
s := stats[month]
trips := tripsByMonth[month]
// Find longest and shortest trip durations
var longestDur, shortestDur time.Duration
var longestDist, shortestDist float64
if len(trips) > 0 {
for i, t := range trips {
dur := t.EndTime.Sub(t.StartTime)
if t.Distance > longestDist || i == 0 {
longestDist = t.Distance
longestDur = dur
}
if t.Distance < shortestDist || i == 0 {
shortestDist = t.Distance
shortestDur = dur
}
}
}
row := []interface{}{
month,
fmt.Sprintf("%.2f Km", s.TotalKM),
fmt.Sprintf("%d", s.Trips),
fmt.Sprintf("%.2f Km", safeDiv(s.TotalKM, float64(s.Trips))),
fmt.Sprintf("%.2f Km (%s)", longestDist, fmtDuration(longestDur)),
fmt.Sprintf("%.2f Km (%s)", shortestDist, fmtDuration(shortestDur)),
fmtDuration(s.TotalDuration),
fmtDuration(s.AvgTripDuration),
fmt.Sprintf("%d", s.OdoAnomalies),
fmt.Sprintf("%.2f Km/h", s.AvgSpeed),
}
if fuelEnabled {
row = append(row, fmt.Sprintf("%.2f EUR", s.FuelCost))
}
fmt.Printf(format+"\n", row...)
}
}
func toInterfaceSlice(strs []string) []interface{} {
res := make([]interface{}, len(strs))
for i, v := range strs {
res[i] = v
}
return res
}
func printTopAddresses(start, end map[string]int) {
fmt.Println("\n=== Frequent Locations ===")
fmt.Println("Top 3 Start Addresses:")
printTopN(start, 3)
fmt.Println("\nTop 3 End Addresses:")
printTopN(end, 3)
}
// Helper functions (safeDiv, fmtDuration, printTopN) remain unchanged from previous version
// [Include the helper functions from previous script here]
func safeDiv(a, b float64) float64 {
if b == 0 {
return 0
}
return a / b
}
func fmtDuration(d time.Duration) string {
h := int(d.Hours())
m := int(d.Minutes()) % 60
return fmt.Sprintf("%02dh%02dm", h, m)
}
func printTopN(counter map[string]int, n int) {
type kv struct {
Key string
Value int
}
var sorted []kv
for k, v := range counter {
sorted = append(sorted, kv{k, v})
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i].Value > sorted[j].Value })
for i := 0; i < n && i < len(sorted); i++ {
fmt.Printf("%d. %s (%d)\n", i+1, sorted[i].Key, sorted[i].Value)
}
}
func sortedKeys(m map[string][]Trip) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func parseFloat(s string) float64 {
f, _ := strconv.ParseFloat(strings.ReplaceAll(s, ",", ""), 64)
return f
}

View File

@@ -0,0 +1,365 @@
package main
import (
"fmt"
"math"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// ANSI color codes
var colors = map[string]string{
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;32m",
"yellow": "\033[0;33m",
"blue": "\033[0;34m",
"purple": "\033[0;35m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"grey": "\033[0;90m",
"reset": "\033[0m",
}
// DistroIcon represents a distro icon and color
type DistroIcon struct {
Icon string
Color string
}
// DotfilesStatus represents the git status of dotfiles
type DotfilesStatus struct {
IsDirty bool
Untracked int
Modified int
Staged int
CommitHash string
Unpushed int
}
func main() {
welcome()
}
func rainbowColor(text string, freq float64, offset float64) string {
var result strings.Builder
for i, char := range text {
if strings.TrimSpace(string(char)) != "" { // Only color non-whitespace characters
// Calculate RGB values using sine waves with phase shifts
r := int(127*math.Sin(freq*float64(i)+offset+0) + 128)
g := int(127*math.Sin(freq*float64(i)+offset+2*math.Pi/3) + 128)
b := int(127*math.Sin(freq*float64(i)+offset+4*math.Pi/3) + 128)
// Apply the RGB color to the character
result.WriteString(fmt.Sprintf("\033[38;2;%d;%d;%dm%c\033[0m", r, g, b, char))
} else {
result.WriteRune(char)
}
}
return result.String()
}
func printLogo() {
logo := ` __ ___ _ ____ __ _____ __
/ |/ /__ ____ ____ ____ ( )_____ / __ \____ / /_/ __(_) /__ _____
/ /|_/ / _ \/ __ \/ __ \/ __ \|// ___/ / / / / __ \/ __/ /_/ / / _ \/ ___/
/ / / / __/ / / / / / / /_/ / (__ ) / /_/ / /_/ / /_/ __/ / / __(__ )
/_/ /_/\___/_/ /_/_/ /_/\____/ /____/ /_____/\____/\__/_/ /_/_/\___/____/`
lines := strings.Split(logo, "\n")
for _, line := range lines {
if strings.TrimSpace(line) != "" {
fmt.Println(rainbowColor(line, 0.1, 0))
} else {
fmt.Println()
}
}
fmt.Println()
}
func getLastSSHLogin() string {
user := os.Getenv("USER")
if user == "" {
user = os.Getenv("USERNAME")
}
if user == "" {
return ""
}
// Try lastlog first
cmd := exec.Command("lastlog", "-u", user)
output, err := cmd.CombinedOutput()
if err != nil {
// Try lastlog2
cmd = exec.Command("lastlog2", user)
output, err = cmd.CombinedOutput()
if err != nil {
return ""
}
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) >= 2 {
parts := strings.Fields(lines[1])
if len(parts) >= 7 && strings.Contains(parts[1], "ssh") {
ip := parts[2]
timeStr := strings.Join(parts[3:], " ")
return fmt.Sprintf("%sLast SSH login%s%s %s%s from%s %s",
colors["cyan"], colors["reset"], colors["yellow"], timeStr, colors["cyan"], colors["yellow"], ip)
}
}
return ""
}
func checkDotfilesStatus() *DotfilesStatus {
dotfilesPath := os.Getenv("DOTFILES_PATH")
if dotfilesPath == "" {
homeDir, _ := os.UserHomeDir()
dotfilesPath = filepath.Join(homeDir, ".dotfiles")
}
gitPath := filepath.Join(dotfilesPath, ".git")
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
return nil
}
status := &DotfilesStatus{}
// Check git status
cmd := exec.Command("git", "status", "--porcelain")
cmd.Dir = dotfilesPath
output, err := cmd.Output()
if err == nil && strings.TrimSpace(string(output)) != "" {
status.IsDirty = true
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "??") {
status.Untracked++
}
if strings.HasPrefix(line, " M") || strings.HasPrefix(line, "MM") {
status.Modified++
}
if strings.HasPrefix(line, "M ") || strings.HasPrefix(line, "A ") {
status.Staged++
}
}
}
// Get commit hash
cmd = exec.Command("git", "rev-parse", "--short", "HEAD")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
status.CommitHash = strings.TrimSpace(string(output))
}
// Count unpushed commits
cmd = exec.Command("git", "log", "--oneline", "@{u}..")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) > 0 && lines[0] != "" {
status.Unpushed = len(lines)
}
}
return status
}
func getCondensedStatus() (string, string) {
var statusParts []string
var hashInfo string
// Check trash status
homeDir, _ := os.UserHomeDir()
trashPath := filepath.Join(homeDir, ".local", "share", "Trash", "files")
if entries, err := os.ReadDir(trashPath); err == nil {
count := len(entries)
if count > 0 {
statusParts = append(statusParts, fmt.Sprintf("[!] %d file(s) in trash", count))
}
}
// Check dotfiles status
dotfilesStatus := checkDotfilesStatus()
if dotfilesStatus != nil {
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, fmt.Sprintf("%sdotfiles is dirty%s", colors["yellow"], colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] untracked%s", colors["red"], dotfilesStatus.Untracked, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] modified%s", colors["yellow"], dotfilesStatus.Modified, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] staged%s", colors["green"], dotfilesStatus.Staged, colors["reset"]))
}
if dotfilesStatus.CommitHash != "" {
hashInfo = fmt.Sprintf("%s[%s%s%s]%s", colors["white"], colors["blue"], dotfilesStatus.CommitHash, colors["white"], colors["reset"])
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, hashInfo)
hashInfo = ""
}
}
if dotfilesStatus.Unpushed > 0 {
statusParts = append(statusParts, fmt.Sprintf("%s[!] You have %d commit(s) to push%s", colors["yellow"], dotfilesStatus.Unpushed, colors["reset"]))
}
} else {
statusParts = append(statusParts, "Unable to check dotfiles status")
}
statusLine := ""
if len(statusParts) > 0 {
statusLine = strings.Join(statusParts, " - ")
}
return statusLine, hashInfo
}
func runDotfilesCommand(args ...string) (string, error) {
cmd := exec.Command("dotfiles", args...)
output, err := cmd.Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(output)), nil
}
func getDistroIcon() (string, string) {
distroIcons := map[string]DistroIcon{
"windows": {"\uf17a", colors["blue"]}, // blue
"linux": {"\uf17c", colors["yellow"]}, // yellow
"ubuntu": {"\uf31b", "\033[38;5;208m"}, // orange (ANSI 208)
"debian": {"\uf306", colors["red"]}, // red
"arch": {"\uf303", colors["cyan"]}, // cyan
"fedora": {"\uf30a", colors["blue"]}, // blue
"alpine": {"\uf300", colors["cyan"]}, // cyan
"macos": {"\uf179", colors["white"]}, // white
"darwin": {"\uf179", colors["white"]}, // white
"osx": {"\uf179", colors["white"]}, // white
}
distro, err := runDotfilesCommand("variables", "get", "Platform.Distro", "--format", "raw")
if err != nil {
distro = strings.ToLower(runtime.GOOS)
} else {
distro = strings.ToLower(distro)
}
if icon, exists := distroIcons[distro]; exists {
return icon.Icon, icon.Color
}
// Try partial match
for key, icon := range distroIcons {
if strings.Contains(distro, key) {
return icon.Icon, icon.Color
}
}
return "", ""
}
func detectShell() string {
// Check for PowerShell profile
if os.Getenv("PROFILE") != "" || os.Getenv("PW_SH_PROFILE") != "" || os.Getenv("PSModulePath") != "" {
return "powershell"
}
if shell := os.Getenv("SHELL"); shell != "" {
return filepath.Base(shell)
}
if comspec := os.Getenv("COMSPEC"); comspec != "" {
if strings.HasSuffix(strings.ToLower(comspec), "cmd.exe") {
if os.Getenv("PROFILE") != "" {
return "Powershell"
}
return "CMD"
}
return filepath.Base(comspec)
}
return "unknown"
}
func welcome() {
printLogo()
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown-host"
}
// Get distro icon
distroIcon, iconColor := getDistroIcon()
// Get username
username := os.Getenv("USER")
if username == "" {
username = os.Getenv("USERNAME")
}
if username == "" {
username = "user"
}
// Get SSH login info
sshLogin := getLastSSHLogin()
// Get shell and arch
shell := detectShell()
arch := runtime.GOARCH
// Capitalize shell and arch for display
shellDisp := strings.Title(shell)
archDisp := strings.ToUpper(arch)
// Get package managers
pkgMgrs, err := runDotfilesCommand("variables", "get", "Platform.AvailablePackageManagers", "--format", "raw")
if err != nil {
pkgMgrs = ""
}
// Compact single line: user@hostname with icon, shell, arch
fmt.Printf("%s%s%s@%s%s", colors["green"], username, colors["cyan"], colors["yellow"], hostname)
if distroIcon != "" {
fmt.Printf(" %s%s", iconColor, distroIcon)
}
fmt.Printf("%s running %s%s%s/%s%s", colors["cyan"], colors["blue"], shellDisp, colors["cyan"], colors["purple"], archDisp)
if pkgMgrs != "" {
// Parse and color package managers
pkgMgrs = strings.Trim(pkgMgrs, "[]")
pmList := strings.Fields(strings.ReplaceAll(pkgMgrs, ",", ""))
pmColors := []string{colors["yellow"], colors["green"], colors["cyan"], colors["red"], colors["blue"]}
var coloredPMs []string
for i, pm := range pmList {
color := pmColors[i%len(pmColors)]
coloredPMs = append(coloredPMs, fmt.Sprintf("%s%s", color, pm))
}
fmt.Printf("%s [%s%s]", colors["cyan"], strings.Join(coloredPMs, colors["cyan"]+"/"), colors["reset"])
} else {
fmt.Printf("%s", colors["reset"])
}
// Get status info
condensedStatus, hashInfo := getCondensedStatus()
// Add hash to same line if dotfiles is clean
if hashInfo != "" {
fmt.Printf(" %s", hashInfo)
}
fmt.Println()
// Display last SSH login info if available
if sshLogin != "" {
fmt.Printf("%s%s\n", sshLogin, colors["reset"])
}
// Display condensed status line only if there are issues
if condensedStatus != "" {
fmt.Printf("%s%s%s\n", colors["yellow"], condensedStatus, colors["reset"])
}
}

View File

@@ -0,0 +1,748 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
// Color constants for terminal output
const (
Red = "\033[0;31m"
Green = "\033[0;32m"
Yellow = "\033[1;33m"
Blue = "\033[0;34m"
Cyan = "\033[0;36m"
Bold = "\033[1m"
NC = "\033[0m" // No Color
)
// ProcessInfo holds information about a process using a port
type ProcessInfo struct {
PID int
ProcessName string
Protocol string
DockerInfo string
}
// DockerContainer represents a Docker container
type DockerContainer struct {
Name string
Image string
Ports []PortMapping
Network string
}
// PortMapping represents a port mapping
type PortMapping struct {
ContainerPort int
HostPort int
Protocol string
IPv6 bool
}
func main() {
if len(os.Args) < 2 {
showUsage()
os.Exit(1)
}
arg := os.Args[1]
switch arg {
case "--help", "-h":
showHelp()
case "--list", "-l":
listDockerServices()
default:
port, err := strconv.Atoi(arg)
if err != nil || port < 1 || port > 65535 {
fmt.Printf("%sError:%s Invalid port number. Must be between 1 and 65535.\n", Red, NC)
os.Exit(1)
}
checkPort(port)
}
}
func showUsage() {
fmt.Printf("%sUsage:%s inuse <port_number>\n", Red, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
fmt.Printf("%s inuse --help%s\n", Yellow, NC)
fmt.Printf("%sExample:%s inuse 80\n", Yellow, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
}
func showHelp() {
fmt.Printf("%s%sinuse - Check if a port is in use%s\n\n", Cyan, Bold, NC)
fmt.Printf("%sUSAGE:%s\n", Bold, NC)
fmt.Printf(" inuse <port_number> Check if a specific port is in use\n")
fmt.Printf(" inuse --list, -l List all Docker services with listening ports\n")
fmt.Printf(" inuse --help, -h Show this help message\n\n")
fmt.Printf("%sEXAMPLES:%s\n", Bold, NC)
fmt.Printf(" %sinuse 80%s Check if port 80 is in use\n", Green, NC)
fmt.Printf(" %sinuse 3000%s Check if port 3000 is in use\n", Green, NC)
fmt.Printf(" %sinuse --list%s Show all Docker services with ports\n\n", Green, NC)
fmt.Printf("%sDESCRIPTION:%s\n", Bold, NC)
fmt.Printf(" The inuse function checks if a specific port is in use and identifies\n")
fmt.Printf(" the process using it. It can detect regular processes, Docker containers\n")
fmt.Printf(" with published ports, and containers using host networking.\n\n")
fmt.Printf("%sOUTPUT:%s\n", Bold, NC)
fmt.Printf(" %s✓%s Port is in use - shows process name, PID, and Docker info if applicable\n", Green, NC)
fmt.Printf(" %s✗%s Port is free\n", Red, NC)
fmt.Printf(" %s⚠%s Port is in use but process cannot be identified\n", Yellow, NC)
}
func listDockerServices() {
if !isDockerAvailable() {
fmt.Printf("%sError:%s Docker is not available\n", Red, NC)
os.Exit(1)
}
fmt.Printf("%s%sDocker Services with Listening Ports:%s\n\n", Cyan, Bold, NC)
containers := getRunningContainers()
if len(containers) == 0 {
fmt.Printf("%sNo running Docker containers found%s\n", Yellow, NC)
return
}
foundServices := false
for _, container := range containers {
if len(container.Ports) > 0 {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s📦 %s%s%s %s(%s)%s\n", Green, Bold, container.Name, NC, Cyan, cleanImage, NC)
for _, port := range container.Ports {
ipv6Marker := ""
if port.IPv6 {
ipv6Marker = " [IPv6]"
}
fmt.Printf("%s ├─ Port %s%d%s%s → %d (%s)%s%s\n",
Cyan, Bold, port.HostPort, NC, Cyan, port.ContainerPort, port.Protocol, ipv6Marker, NC)
}
fmt.Println()
foundServices = true
}
}
// Check for host networking containers
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s%sHost Networking Containers:%s\n", Yellow, Bold, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s🌐 %s%s%s %s(%s)%s %s- uses host networking%s\n",
Yellow, Bold, container.Name, NC, Cyan, cleanImage, NC, Yellow, NC)
}
fmt.Println()
foundServices = true
}
if !foundServices {
fmt.Printf("%sNo Docker services with exposed ports found%s\n", Yellow, NC)
}
}
func checkPort(port int) {
// Check if port is in use first
if !isPortInUse(port) {
fmt.Printf("%s✗ Port %d is FREE%s\n", Red, port, NC)
os.Exit(1)
}
// Port is in use, now find what's using it
process := findProcessUsingPort(port)
if process != nil {
dockerInfo := ""
if process.DockerInfo != "" {
dockerInfo = " " + process.DockerInfo
}
fmt.Printf("%s✓ Port %d (%s) in use by %s%s%s %sas PID %s%d%s%s\n",
Green, port, process.Protocol, Bold, process.ProcessName, NC, Green, Bold, process.PID, NC, dockerInfo)
return
}
// Check if it's a Docker container
containerInfo := findDockerContainerUsingPort(port)
if containerInfo != "" {
fmt.Printf("%s✓ Port %d in use by Docker container %s\n", Green, port, containerInfo)
return
}
// If we still haven't found the process, check for host networking containers more thoroughly
hostNetworkProcess := findHostNetworkingProcess(port)
if hostNetworkProcess != "" {
fmt.Printf("%s✓ Port %d likely in use by %s\n", Green, port, hostNetworkProcess)
return
}
// If we still haven't found the process
fmt.Printf("%s⚠ Port %d is in use but unable to identify the process%s\n", Yellow, port, NC)
if isDockerAvailable() {
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s Note: Found Docker containers using host networking:%s\n", Cyan, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s - %s (%s)%s\n", Cyan, container.Name, cleanImage, NC)
}
fmt.Printf("%s These containers share the host's network, so one of them might be using this port%s\n", Cyan, NC)
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
}
func isPortInUse(port int) bool {
// Try ss first
if isCommandAvailable("ss") {
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
// Try netstat as fallback
if isCommandAvailable("netstat") {
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
return false
}
func findProcessUsingPort(port int) *ProcessInfo {
// Method 1: Try netstat
if process := tryNetstat(port); process != nil {
return process
}
// Method 2: Try ss
if process := trySS(port); process != nil {
return process
}
// Method 3: Try lsof
if process := tryLsof(port); process != nil {
return process
}
// Method 4: Try fuser
if process := tryFuser(port); process != nil {
return process
}
return nil
}
func tryNetstat(port int) *ProcessInfo {
if !isCommandAvailable("netstat") {
return nil
}
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "netstat", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
fields := strings.Fields(line)
if len(fields) >= 7 {
pidProcess := fields[6]
parts := strings.Split(pidProcess, "/")
if len(parts) >= 2 {
if pid, err := strconv.Atoi(parts[0]); err == nil {
processName := parts[1]
protocol := fields[0]
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func trySS(port int) *ProcessInfo {
if !isCommandAvailable("ss") {
return nil
}
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "ss", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
pidRegex := regexp.MustCompile(`pid=(\d+)`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
matches := pidRegex.FindStringSubmatch(line)
if len(matches) >= 2 {
if pid, err := strconv.Atoi(matches[1]); err == nil {
processName := getProcessName(pid)
if processName != "" {
fields := strings.Fields(line)
protocol := ""
if len(fields) > 0 {
protocol = fields[0]
}
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func tryLsof(port int) *ProcessInfo {
if !isCommandAvailable("lsof") {
return nil
}
cmd := exec.Command("lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "LISTEN") {
fields := strings.Fields(line)
if len(fields) >= 2 {
processName := fields[0]
if pid, err := strconv.Atoi(fields[1]); err == nil {
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: dockerInfo,
}
}
}
}
}
return nil
}
func tryFuser(port int) *ProcessInfo {
if !isCommandAvailable("fuser") {
return nil
}
cmd := exec.Command("fuser", fmt.Sprintf("%d/tcp", port))
output, err := cmd.Output()
if err != nil {
return nil
}
pids := strings.Fields(string(output))
for _, pidStr := range pids {
if pid, err := strconv.Atoi(strings.TrimSpace(pidStr)); err == nil {
processName := getProcessName(pid)
if processName != "" {
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: "",
}
}
}
}
return nil
}
func getProcessName(pid int) string {
cmd := exec.Command("ps", "-p", strconv.Itoa(pid), "-o", "comm=")
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getDockerInfo(pid int, processName string, port int) string {
if !isDockerAvailable() {
return ""
}
// Check if it's docker-proxy (handle truncated names like "docker-pr")
if processName == "docker-proxy" || strings.HasPrefix(processName, "docker-pr") {
containerName := getContainerByPublishedPort(port)
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s(Docker: %s, image: %s)%s", Cyan, containerName, cleanImage, NC)
}
return fmt.Sprintf("%s(Docker proxy)%s", Cyan, NC)
}
// Check if process is in a Docker container using cgroup
containerInfo := getContainerByPID(pid)
if containerInfo != "" {
return fmt.Sprintf("%s(Docker: %s)%s", Cyan, containerInfo, NC)
}
// Check if this process might be in a host networking container
hostContainer := checkHostNetworkingContainer(pid, processName)
if hostContainer != "" {
return fmt.Sprintf("%s(Docker host network: %s)%s", Cyan, hostContainer, NC)
}
return ""
}
func getContainerByPID(pid int) string {
cgroupPath := fmt.Sprintf("/proc/%d/cgroup", pid)
file, err := os.Open(cgroupPath)
if err != nil {
return ""
}
defer file.Close()
scanner := bufio.NewScanner(file)
containerIDRegex := regexp.MustCompile(`[a-f0-9]{64}`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "docker") {
matches := containerIDRegex.FindStringSubmatch(line)
if len(matches) > 0 {
containerID := matches[0]
containerName := getContainerNameByID(containerID)
if containerName != "" {
return containerName
}
return containerID[:12]
}
}
}
return ""
}
func findDockerContainerUsingPort(port int) string {
if !isDockerAvailable() {
return ""
}
// Check for containers with published ports
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
containerName := strings.TrimSpace(string(output))
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s%s%s %s(published port, image: %s)%s", Bold, containerName, NC, Cyan, cleanImage, NC)
}
return ""
}
func isDockerAvailable() bool {
return isCommandAvailable("docker")
}
func isCommandAvailable(command string) bool {
_, err := exec.LookPath(command)
return err == nil
}
func getRunningContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Ports: getContainerPorts(containerName),
}
containers = append(containers, container)
}
}
return containers
}
func getHostNetworkingContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", "network=host")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Network: "host",
}
containers = append(containers, container)
}
}
return containers
}
func getContainerImage(containerName string) string {
cmd := exec.Command("docker", "inspect", containerName)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if image, ok := inspectData[0]["Config"].(map[string]interface{})["Image"].(string); ok {
return image
}
}
return ""
}
func getContainerPorts(containerName string) []PortMapping {
cmd := exec.Command("docker", "port", containerName)
output, err := cmd.Output()
if err != nil {
return nil
}
var ports []PortMapping
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> 0\.0\.0\.0:(\d+)`)
ipv6PortRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> \[::\]:(\d+)`)
for scanner.Scan() {
line := scanner.Text()
// Check for IPv4
if matches := portRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: false,
})
}
// Check for IPv6
if matches := ipv6PortRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: true,
})
}
}
return ports
}
func getContainerByPublishedPort(port int) string {
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getContainerNameByID(containerID string) string {
cmd := exec.Command("docker", "inspect", containerID)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if name, ok := inspectData[0]["Name"].(string); ok {
return strings.TrimPrefix(name, "/")
}
}
return ""
}
func cleanImageName(image string) string {
// Remove SHA256 hashes
shaRegex := regexp.MustCompile(`sha256:[a-f0-9]*`)
cleaned := shaRegex.ReplaceAllString(image, "[image-hash]")
// Remove registry prefixes, keep only the last part
parts := strings.Split(cleaned, "/")
if len(parts) > 0 {
return parts[len(parts)-1]
}
return cleaned
}
func findHostNetworkingProcess(port int) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Check if this container might be using the port
if isContainerUsingPort(container.Name, port) {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s%s%s %s(Docker host network: %s)%s", Bold, container.Name, NC, Cyan, cleanImage, NC)
}
}
return ""
}
func isContainerUsingPort(containerName string, port int) bool {
// Try to execute netstat inside the container to see if it's listening on the port
cmd := exec.Command("docker", "exec", containerName, "sh", "-c",
fmt.Sprintf("netstat -tlnp 2>/dev/null | grep ':%d ' || ss -tlnp 2>/dev/null | grep ':%d '", port, port))
output, err := cmd.Output()
if err != nil {
return false
}
return len(output) > 0
}
func checkHostNetworkingContainer(pid int, processName string) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers and check if any match this process
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Try to find this process inside the container
cmd := exec.Command("docker", "exec", container.Name, "sh", "-c",
fmt.Sprintf("ps -o pid,comm | grep '%s' | grep -q '%d\\|%s'", processName, pid, processName))
err := cmd.Run()
if err == nil {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s (%s)", container.Name, cleanImage)
}
}
return ""
}

249
ansible/tasks/global/utils/ipaddr Executable file
View File

@@ -0,0 +1,249 @@
#!/usr/bin/env python3
import os
import subprocess
import argparse
import requests
def get_physical_interfaces():
"""
Retrieve a list of physical network interfaces on the system.
This function checks the `/sys/class/net/` directory to identify physical
network interfaces. It determines if an interface is physical by verifying
the presence of a symbolic link to a `device` directory.
Returns:
list: A list of strings, where each string is the name of a physical
network interface.
"""
interfaces_path = '/sys/class/net/'
physical_interfaces = []
for interface in os.listdir(interfaces_path):
if not os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
physical_interfaces.append(interface)
return physical_interfaces
def get_virtual_interfaces():
"""
Retrieves a list of virtual network interfaces on the system.
This function scans the network interfaces available in the '/sys/class/net/'
directory and filters out physical interfaces and the loopback interface ('lo').
It identifies virtual interfaces by checking if the 'device' path is not a
symbolic link.
Returns:
list: A list of virtual network interface names as strings.
"""
interfaces_path = '/sys/class/net/'
virtual_interfaces = []
for interface in os.listdir(interfaces_path):
if os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
if interface == 'lo':
continue
virtual_interfaces.append(interface)
return virtual_interfaces
def get_up_interfaces(interfaces):
"""
Filters the given list of interfaces to include only those that are up or unknown.
Args:
interfaces (list): A list of interface names.
Returns:
list: A list of interface names that are up or treated as up (e.g., UNKNOWN).
"""
up_interfaces = []
for interface in interfaces:
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
if "state UP" in result.stdout or "state UNKNOWN" in result.stdout:
up_interfaces.append(interface)
except Exception:
continue
return up_interfaces
def get_interface_state(interface):
"""
Retrieve the state and MAC address of a network interface.
Args:
interface (str): The name of the network interface.
Returns:
tuple: A tuple containing the state (str) and MAC address (str) of the interface.
"""
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
lines = result.stdout.splitlines()
state = "UNKNOWN"
mac = "N/A"
if len(lines) > 0:
if "state UP" in lines[0]:
state = "UP"
elif "state DOWN" in lines[0]:
state = "DOWN"
elif "state UNKNOWN" in lines[0]:
state = "UP" # Treat UNKNOWN as UP
if len(lines) > 1:
mac = lines[1].strip().split()[1] if len(lines[1].strip().split()) > 1 else "N/A"
return state, mac
except Exception:
return "UNKNOWN", "N/A"
def get_external_ips():
"""
Fetch both IPv4 and IPv6 external IP addresses of the machine.
This function first attempts to retrieve an IP address using the services
`https://ifconfig.co`, `https://ifconfig.io`, and `https://ifconfig.me`. If the
first IP fetched is IPv6, it explicitly tries to fetch an IPv4 address using
curl's `-4` option.
Returns:
tuple: A tuple containing the IPv4 and IPv6 addresses as strings. If either
address cannot be fetched, it will be set to "Unavailable".
"""
services = ["https://ip.mvl.sh", "https://ifconfig.co", "https://api.ipify.org", "https://myexternalip.com/raw", "https://ifconfig.io", "https://ifconfig.me"]
headers = {"User-Agent": "curl"}
ipv4, ipv6 = "Unavailable", "Unavailable"
for service in services:
try:
response = requests.get(service, headers=headers, timeout=0.2)
if response.status_code == 200:
ip = response.text.strip()
if ":" in ip: # IPv6 address
ipv6 = ip
# Try to fetch IPv4 explicitly
ipv4_response = subprocess.run(
["curl", "-4", "--silent", service],
capture_output=True,
text=True,
timeout=0.2,
check=True
)
if ipv4_response.returncode == 0:
ipv4 = ipv4_response.stdout.strip()
else: # IPv4 address
ipv4 = ip
if ipv4 != "Unavailable" and ipv6 != "Unavailable":
break
except (requests.RequestException, subprocess.TimeoutExpired):
continue
return ipv4, ipv6
def display_interface_details(show_physical=False, show_virtual=False, show_all=False, show_external_ip=False, show_ipv6=False):
"""
Display details of network interfaces based on the specified flags.
Args:
show_physical (bool): Show physical interfaces (UP by default unless combined with show_all).
show_virtual (bool): Show virtual interfaces (UP by default unless combined with show_all).
show_all (bool): Include all interfaces (UP, DOWN, UNKNOWN).
show_external_ip (bool): Fetch and display the external IP address.
show_ipv6 (bool): Include IPv6 addresses in the output.
Notes:
- By default, only IPv4 addresses are shown unless `-6` is specified.
- IPv6 addresses are displayed in a separate column if `-6` is specified.
"""
if show_external_ip:
ipv4, ipv6 = get_external_ips()
print(f"External IPv4: {ipv4}")
print(f"External IPv6: {ipv6}")
print("-" * 70)
interfaces = []
if show_all:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_physical_interfaces())
if show_virtual:
interfaces.extend(get_virtual_interfaces())
else:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_up_interfaces(get_physical_interfaces()))
if show_virtual or not show_physical: # Default to virtual if no `-p`
interfaces.extend(get_up_interfaces(get_virtual_interfaces()))
interfaces.sort()
# Define column widths based on expected maximum content length
col_widths = {
'interface': 15,
'ipv4': 18,
'ipv6': 40 if show_ipv6 else 0, # Hide IPv6 column if not showing IPv6
'subnet': 10,
'state': 10,
'mac': 18
}
# Print header with proper formatting
header = f"{'Interface':<{col_widths['interface']}} {'IPv4 Address':<{col_widths['ipv4']}}"
if show_ipv6:
header += f" {'IPv6 Address':<{col_widths['ipv6']}}"
header += f" {'Subnet':<{col_widths['subnet']}} {'State':<{col_widths['state']}} {'MAC Address':<{col_widths['mac']}}"
print(header)
print("-" * (col_widths['interface'] + col_widths['ipv4'] + (col_widths['ipv6'] if show_ipv6 else 0) + col_widths['subnet'] + col_widths['state'] + col_widths['mac']))
for interface in interfaces:
try:
result = subprocess.run(['ip', '-br', 'addr', 'show', interface],
capture_output=True, text=True, check=True)
state, mac = get_interface_state(interface)
if result.returncode == 0:
lines = result.stdout.strip().splitlines()
ipv4 = "N/A"
ipv6 = "N/A"
subnet = ""
for line in lines:
parts = line.split()
if len(parts) >= 3:
ip_with_mask = parts[2]
# Check if the address is IPv4 or IPv6
if ":" in ip_with_mask: # IPv6
ipv6 = ip_with_mask.split('/')[0]
else: # IPv4
ipv4 = ip_with_mask.split('/')[0]
subnet = ip_with_mask.split('/')[1] if '/' in ip_with_mask else ""
row = f"{interface:<{col_widths['interface']}} {ipv4:<{col_widths['ipv4']}}"
if show_ipv6:
row += f" {ipv6:<{col_widths['ipv6']}}"
row += f" {subnet:<{col_widths['subnet']}} {state:<{col_widths['state']}} {mac:<{col_widths['mac']}}"
print(row)
except Exception as e:
print(f"Error fetching details for {interface}: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Display network interface information')
parser.add_argument('-p', action='store_true', help='Show physical interfaces (UP by default)')
parser.add_argument('-v', action='store_true', help='Show virtual interfaces (UP by default)')
parser.add_argument('-a', action='store_true', help='Include all interfaces (UP, DOWN, UNKNOWN)')
parser.add_argument('-e', action='store_true', help='Fetch and display the external IP address')
parser.add_argument('--ipv6', '-6', action='store_true', help='Include IPv6 addresses in the output')
args = parser.parse_args()
# Default to showing both UP physical and virtual interfaces if no flags are specified
display_interface_details(show_physical=args.p or not (args.p or args.v or args.a or args.e),
show_virtual=args.v or not (args.p or args.v or args.a or args.e),
show_all=args.a,
show_external_ip=args.e,
show_ipv6=args.ipv6)

298
ansible/tasks/global/utils/llm Executable file
View File

@@ -0,0 +1,298 @@
#!/bin/bash
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
KOBOLD_PATH="/mnt/data/ai/llm/koboldcpp-linux-x64"
KOBOLD_MODEL="/mnt/data/ai/llm/Mistral-Small-24B-Instruct-2501-Q4_K_M.gguf" # Default model
SILLYTAVERN_SCREEN="sillytavern"
KOBOLD_SCREEN="koboldcpp"
# Function to check if a screen session exists
check_screen() {
screen -ls | grep -q "\.${1}\s"
}
# Function to list available models
list_models() {
echo -e "${BLUE}Available models:${NC}"
ls -1 /mnt/data/ai/llm/*.gguf | nl -w2 -s'. '
}
# Function to select a model
select_model() {
list_models
echo
read -p "Select model number (or press Enter for default): " model_num
if [[ -z "$model_num" ]]; then
echo -e "${YELLOW}Using default model: $(basename "$KOBOLD_MODEL")${NC}"
else
selected_model=$(ls -1 /mnt/data/ai/llm/*.gguf | sed -n "${model_num}p")
if [[ -n "$selected_model" ]]; then
KOBOLD_MODEL="$selected_model"
echo -e "${GREEN}Selected model: $(basename "$KOBOLD_MODEL")${NC}"
else
echo -e "${RED}Invalid selection. Using default model.${NC}"
fi
fi
}
# Function to start SillyTavern
start_sillytavern() {
echo -e "${YELLOW}Starting SillyTavern in screen session '${SILLYTAVERN_SCREEN}'...${NC}"
screen -dmS "$SILLYTAVERN_SCREEN" bash -c "sillytavern --listen 0.0.0.0"
sleep 2
if check_screen "$SILLYTAVERN_SCREEN"; then
echo -e "${GREEN}✓ SillyTavern started successfully!${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:8000${NC}"
else
echo -e "${RED}✗ Failed to start SillyTavern${NC}"
fi
}
# Function to start KoboldCPP
start_koboldcpp() {
select_model
echo -e "${YELLOW}Starting KoboldCPP in screen session '${KOBOLD_SCREEN}'...${NC}"
screen -dmS "$KOBOLD_SCREEN" bash -c "cd /mnt/data/ai/llm && ./koboldcpp-linux-x64 --model '$KOBOLD_MODEL' --host 0.0.0.0 --port 5001 --contextsize 8192 --gpulayers 999"
sleep 2
if check_screen "$KOBOLD_SCREEN"; then
echo -e "${GREEN}✓ KoboldCPP started successfully!${NC}"
echo -e "${BLUE} Model: $(basename "$KOBOLD_MODEL")${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:5001${NC}"
else
echo -e "${RED}✗ Failed to start KoboldCPP${NC}"
fi
}
# Function to stop a service
stop_service() {
local service=$1
local screen_name=$2
echo -e "${YELLOW}Stopping ${service}...${NC}"
screen -S "$screen_name" -X quit
sleep 1
if ! check_screen "$screen_name"; then
echo -e "${GREEN}✓ ${service} stopped successfully${NC}"
else
echo -e "${RED}✗ Failed to stop ${service}${NC}"
fi
}
# Function to show service status
show_status() {
echo -e "${CYAN}╔═══════════════════════════════════════╗${NC}"
echo -e "${CYAN}║ Service Status Overview ║${NC}"
echo -e "${CYAN}╚═══════════════════════════════════════╝${NC}"
echo
local st_running=false
local kc_running=false
# Check SillyTavern
if check_screen "$SILLYTAVERN_SCREEN"; then
st_running=true
echo -e " ${GREEN}●${NC} SillyTavern: ${GREEN}Running${NC} (screen: ${SILLYTAVERN_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:8000${NC}"
else
echo -e " ${RED}●${NC} SillyTavern: ${RED}Not running${NC}"
fi
echo
# Check KoboldCPP
if check_screen "$KOBOLD_SCREEN"; then
kc_running=true
echo -e " ${GREEN}●${NC} KoboldCPP: ${GREEN}Running${NC} (screen: ${KOBOLD_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:5001${NC}"
else
echo -e " ${RED}●${NC} KoboldCPP: ${RED}Not running${NC}"
fi
echo
}
# Function to handle service management
manage_services() {
local st_running=$(check_screen "$SILLYTAVERN_SCREEN" && echo "true" || echo "false")
local kc_running=$(check_screen "$KOBOLD_SCREEN" && echo "true" || echo "false")
# If both services are running
if [[ "$st_running" == "true" ]] && [[ "$kc_running" == "true" ]]; then
echo -e "${GREEN}Both services are running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Attach to KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Restart KoboldCPP"
echo "5) Stop all services"
echo "6) Exit"
read -p "Your choice (1-6): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
5)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
6)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only SillyTavern is running
elif [[ "$st_running" == "true" ]]; then
echo -e "${YELLOW}Only SillyTavern is running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Start KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Stop SillyTavern"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
start_koboldcpp
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only KoboldCPP is running
elif [[ "$kc_running" == "true" ]]; then
echo -e "${YELLOW}Only KoboldCPP is running.${NC}"
echo
echo "1) Attach to KoboldCPP"
echo "2) Start SillyTavern"
echo "3) Restart KoboldCPP"
echo "4) Stop KoboldCPP"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
2)
start_sillytavern
;;
3)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If no services are running
else
echo -e "${YELLOW}No services are running.${NC}"
echo
echo "1) Start both services"
echo "2) Start SillyTavern only"
echo "3) Start KoboldCPP only"
echo "4) Exit"
read -p "Your choice (1-4): " choice
case $choice in
1)
start_sillytavern
echo
start_koboldcpp
;;
2)
start_sillytavern
;;
3)
start_koboldcpp
;;
4)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
fi
}
# Main script
echo -e "${BLUE}╔═══════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ LLM Services Manager ║${NC}"
echo -e "${BLUE}╚═══════════════════════════════════════╝${NC}"
echo
# Show status
show_status
# Show separator and manage services
echo -e "${CYAN}═══════════════════════════════════════${NC}"
manage_services
echo
echo -e "${BLUE}Quick reference:${NC}"
echo "• List sessions: screen -ls"
echo "• Attach: screen -r <name>"
echo "• Detach: Ctrl+A then D"

View File

@@ -0,0 +1,119 @@
# SSH Utility - Smart SSH Connection Manager
A transparent SSH wrapper that automatically chooses between local and remote connections based on network connectivity.
## What it does
This utility acts as a drop-in replacement for the `ssh` command that intelligently routes connections:
- When you type `ssh desktop`, it automatically checks if your local network is available
- If local: connects via `desktop-local` (faster local connection)
- If remote: connects via `desktop` (Tailscale/VPN connection)
- All other SSH usage passes through unchanged (`ssh --help`, `ssh user@host`, etc.)
## Installation
The utility is automatically compiled and installed to `~/.local/bin/ssh` via Ansible when you run your dotfiles setup.
## Configuration
1. Copy the example config:
```bash
mkdir -p ~/.config/ssh-util
cp ~/.dotfiles/config/ssh-util/config.yaml ~/.config/ssh-util/
```
2. Edit `~/.config/ssh-util/config.yaml` to match your setup:
```yaml
smart_aliases:
desktop:
primary: "desktop-local" # SSH config entry for local connection
fallback: "desktop" # SSH config entry for remote connection
check_host: "192.168.86.22" # IP to ping for connectivity test
timeout: "2s" # Ping timeout
```
3. Ensure your `~/.ssh/config` contains the referenced host entries:
```
Host desktop
HostName mennos-desktop
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
Host desktop-local
HostName 192.168.86.22
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
```
## Usage
Once configured, simply use SSH as normal:
```bash
# Smart connection - automatically chooses local vs remote
ssh desktop
# All other SSH usage works exactly the same
ssh --help
ssh --version
ssh user@example.com
ssh -L 8080:localhost:80 server
```
## How it works
1. When you run `ssh <alias>`, the utility checks if `<alias>` is defined in the smart_aliases config
2. If yes, it pings the `check_host` IP address
3. If ping succeeds: executes `ssh <primary>` instead
4. If ping fails: executes `ssh <fallback>` instead
5. If not a smart alias: passes through to real SSH unchanged
## Troubleshooting
### SSH utility not found
Make sure `~/.local/bin` is in your PATH:
```bash
echo $PATH | grep -o ~/.local/bin
```
### Config not loading
Check the config file exists and has correct syntax:
```bash
ls -la ~/.config/ssh-util/config.yaml
cat ~/.config/ssh-util/config.yaml
```
### Connectivity test failing
Test manually:
```bash
ping -c 1 -W 2 192.168.86.22
```
### Falls back to real SSH
If there are any errors loading config or parsing, the utility safely falls back to executing the real SSH binary at `/usr/bin/ssh`.
## Adding more aliases
To add more smart aliases, just extend the config:
```yaml
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.86.22"
timeout: "2s"
server:
primary: "server-local"
fallback: "server-remote"
check_host: "192.168.1.100"
timeout: "1s"
```
Remember to create the corresponding entries in your `~/.ssh/config`.

View File

@@ -0,0 +1,90 @@
# SSH Utility Configuration
# This file defines smart aliases that automatically choose between local and remote connections
# Logging configuration
logging:
enabled: true
# Levels: debug, info, warn, error
level: "info"
# Formats: console, json
format: "console"
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.1.254"
timeout: "2s"
laptop:
primary: "laptop-local"
fallback: "laptop"
check_host: "192.168.1.253"
timeout: "2s"
# Background SSH Tunnel Definitions
tunnels:
# Example: Desktop database tunnel
desktop-database:
type: local
local_port: 5432
remote_host: database
remote_port: 5432
ssh_host: desktop # Uses smart alias logic (desktop-local/desktop)
# Example: Development API tunnel
dev-api:
type: local
local_port: 8080
remote_host: api
remote_port: 80
ssh_host: dev-server
# Example: SOCKS proxy tunnel
socks-proxy:
type: dynamic
local_port: 1080
ssh_host: bastion
# Modem web interface tunnel
modem-web:
type: local
local_port: 8443
remote_host: 192.168.1.1
remote_port: 443
ssh_host: desktop
# Tunnel Management Commands:
# ssh --tunnel --open desktop-database (or ssh -TO desktop-database)
# ssh --tunnel --close desktop-database (or ssh -TC desktop-database)
# ssh --tunnel --list (or ssh -TL)
#
# Ad-hoc tunnels (not in config):
# ssh -TO temp-api --local 8080:api:80 --via server
# Logging options:
# - enabled: true/false - whether to show any logs
# - level: debug (verbose), info (normal), warn (warnings only), error (errors only)
# - format: console (human readable), json (structured)
# Logs are written to stderr so they don't interfere with SSH output
# How it works:
# 1. When you run: ssh desktop
# 2. The utility pings 192.168.86.22 with a 2s timeout
# 3. If ping succeeds: runs "ssh desktop-local" instead
# 4. If ping fails: runs "ssh desktop" instead
# 5. All other SSH usage (flags, user@host, etc.) passes through unchanged
# Your SSH config should contain the actual host definitions:
# Host desktop
# HostName mennos-desktop
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes
#
# Host desktop-local
# HostName 192.168.86.22
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes

View File

@@ -0,0 +1,20 @@
module ssh-util
go 1.21
require (
github.com/jedib0t/go-pretty/v6 v6.4.9
github.com/rs/zerolog v1.31.0
github.com/spf13/cobra v1.8.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.12.0 // indirect
)

View File

@@ -0,0 +1,46 @@
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jedib0t/go-pretty/v6 v6.4.9 h1:vZ6bjGg2eBSrJn365qlxGcaWu09Id+LHtrfDWlB2Usc=
github.com/jedib0t/go-pretty/v6 v6.4.9/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM=
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,51 @@
---
- name: WSL2 1Password SSH Agent Bridge
block:
- name: Ensure required packages are installed for 1Password sock bridge
ansible.builtin.package:
name:
# 1Password (WSL2 required package for sock bridge)
- socat
state: present
become: true
- name: Ensure .1password directory exists in home
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.1password"
state: directory
mode: '0700'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
- name: Create .agent-bridge.sh in home directory
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.agent-bridge.sh"
mode: '0755'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
content: |
# Code extracted from https://stuartleeks.com/posts/wsl-ssh-key-forward-to-windows/
# (IMPORTANT) Create the folder on your root for the `agent.sock` (How mentioned by @rfay and @Lochnair in the comments)
mkdir -p ~/.1password
# Configure ssh forwarding
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
# need `ps -ww` to get non-truncated command for matching
# use square brackets to generate a regex match for the process we want but that doesn't match the grep command running it!
ALREADY_RUNNING=$(ps -auxww | grep -q "[n]piperelay.exe -ei -s //./pipe/openssh-ssh-agent"; echo $?)
if [[ $ALREADY_RUNNING != "0" ]]; then
if [[ -S $SSH_AUTH_SOCK ]]; then
# not expecting the socket to exist as the forwarding command isn't running (http://www.tldp.org/LDP/abs/html/fto.html)
echo "removing previous socket..."
rm $SSH_AUTH_SOCK
fi
echo "Starting SSH-Agent relay..."
# setsid to force new session to keep running
# set socat to listen on $SSH_AUTH_SOCK and forward to npiperelay which then forwards to openssh-ssh-agent on windows
(setsid socat UNIX-LISTEN:$SSH_AUTH_SOCK,fork EXEC:"npiperelay.exe -ei -s //./pipe/openssh-ssh-agent",nofork &) >/dev/null 2>&1
fi
tags:
- wsl
- wsl2

View File

@@ -0,0 +1,93 @@
---
- name: Borg Backup Installation and Configuration
block:
- name: Check if Borg is already installed
ansible.builtin.command: which borg
register: borg_check
ignore_errors: true
changed_when: false
- name: Ensure Borg is installed
ansible.builtin.package:
name: borg
state: present
become: true
when: borg_check.rc != 0
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg directories
ansible.builtin.file:
path: "{{ borg_dir }}"
state: directory
mode: "0755"
loop:
- "{{ borg_config_dir }}"
- "/mnt/object_storage"
loop_control:
loop_var: borg_dir
become: true
- name: Check if Borg repository exists
ansible.builtin.stat:
path: "{{ borg_repo_dir }}/config"
register: borg_repo_check
become: true
- name: Initialize Borg repository
ansible.builtin.command: >
borg init --encryption=repokey {{ borg_repo_dir }}
environment:
BORG_PASSPHRASE: "{{ borg_passphrase }}"
become: true
when: not borg_repo_check.stat.exists
- name: Create Borg backup script
ansible.builtin.template:
src: templates/borg-backup.sh.j2
dest: "{{ borg_config_dir }}/backup.sh"
mode: "0755"
become: true
- name: Create Borg systemd service
ansible.builtin.template:
src: templates/borg-backup.service.j2
dest: /etc/systemd/system/borg-backup.service
mode: "0644"
become: true
register: borg_service
- name: Create Borg systemd timer
ansible.builtin.template:
src: templates/borg-backup.timer.j2
dest: /etc/systemd/system/borg-backup.timer
mode: "0644"
become: true
register: borg_timer
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: borg_service.changed or borg_timer.changed
- name: Enable and start Borg backup timer
ansible.builtin.systemd:
name: borg-backup.timer
enabled: true
state: started
become: true
- name: Display Borg backup status
ansible.builtin.debug:
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
tags:
- borg-backup
- borg
- backup

View File

@@ -0,0 +1,95 @@
---
- name: Borg Local Sync Installation and Configuration
block:
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg local sync script
template:
src: borg-local-sync.sh.j2
dest: /usr/local/bin/borg-local-sync.sh
mode: "0755"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- name: Create Borg local sync systemd service
template:
src: borg-local-sync.service.j2
dest: /etc/systemd/system/borg-local-sync.service
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
tags:
- borg-local-sync
- name: Create Borg local sync systemd timer
template:
src: borg-local-sync.timer.j2
dest: /etc/systemd/system/borg-local-sync.timer
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
- restart borg-local-sync-timer
tags:
- borg-local-sync
- name: Create log file for Borg local sync
file:
path: /var/log/borg-local-sync.log
state: touch
owner: root
group: root
mode: "0644"
become: yes
tags:
- borg-local-sync
- name: Enable and start Borg local sync timer
systemd:
name: borg-local-sync.timer
enabled: yes
state: started
daemon_reload: yes
become: yes
tags:
- borg-local-sync
- name: Add logrotate configuration for Borg local sync
copy:
content: |
/var/log/borg-local-sync.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 root root
}
dest: /etc/logrotate.d/borg-local-sync
mode: "0644"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- borg
- backup
tags:
- borg-local-sync
- borg
- backup

View File

@@ -0,0 +1,88 @@
---
- name: Dynamic DNS setup
block:
- name: Create systemd environment file for dynamic DNS
ansible.builtin.template:
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
dest: "/etc/dynamic-dns-systemd.env"
mode: "0600"
owner: root
group: root
become: true
- name: Create dynamic DNS wrapper script
ansible.builtin.copy:
dest: "/usr/local/bin/dynamic-dns-update.sh"
mode: "0755"
content: |
#!/bin/bash
# Run dynamic DNS update (binary compiled by utils.yml)
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
become: true
- name: Create dynamic DNS systemd timer
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.timer"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update Timer
Requires=dynamic-dns.service
[Timer]
OnCalendar=*:0/15
Persistent=true
[Install]
WantedBy=timers.target
become: true
register: ddns_timer
- name: Create dynamic DNS systemd service
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.service"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/dynamic-dns-update.sh
EnvironmentFile=/etc/dynamic-dns-systemd.env
User={{ ansible_user }}
Group={{ ansible_user }}
[Install]
WantedBy=multi-user.target
become: true
register: ddns_service
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: ddns_timer.changed or ddns_service.changed
- name: Enable and start dynamic DNS timer
ansible.builtin.systemd:
name: dynamic-dns.timer
enabled: true
state: started
become: true
- name: Display setup completion message
ansible.builtin.debug:
msg: |
Dynamic DNS setup complete!
- Systemd timer: sudo systemctl status dynamic-dns.timer
- Check logs: sudo journalctl -u dynamic-dns.service -f
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
when: inventory_hostname == 'mennos-desktop' or inventory_hostname == 'mennos-vps'
tags:
- dynamic-dns

View File

@@ -0,0 +1,94 @@
---
- name: JuiceFS Installation and Configuration
block:
- name: Check if JuiceFS is already installed
ansible.builtin.command: which juicefs
register: juicefs_check
ignore_errors: true
changed_when: false
- name: Install JuiceFS using the automatic installer
ansible.builtin.shell: curl -sSL https://d.juicefs.com/install | sh -
register: juicefs_installation
when: juicefs_check.rc != 0
become: true
- name: Verify JuiceFS installation
ansible.builtin.command: juicefs version
register: juicefs_version
changed_when: false
when: juicefs_check.rc != 0 or juicefs_installation.changed
- name: Create mount directory
ansible.builtin.file:
path: /mnt/object_storage
state: directory
mode: "0755"
become: true
- name: Create cache directory
ansible.builtin.file:
path: /var/jfsCache
state: directory
mode: "0755"
become: true
- name: Configure JuiceFS network performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "net.core.rmem_max", value: "16777216" }
- { name: "net.core.wmem_max", value: "16777216" }
- { name: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" }
- { name: "net.ipv4.tcp_wmem", value: "4096 65536 16777216" }
- name: Set JuiceFS facts
ansible.builtin.set_fact:
hetzner_access_key: "{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_ACCESS_KEY_ID') }}"
hetzner_secret_key:
"{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_SECRET_ACCESS_KEY')
}}"
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
- name: Create JuiceFS systemd service file
ansible.builtin.template:
src: templates/juicefs.service.j2
dest: /etc/systemd/system/juicefs.service
owner: root
group: root
mode: "0644"
become: true
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
- name: Include JuiceFS Redis tasks
ansible.builtin.include_tasks: services/redis/redis.yml
when: inventory_hostname == 'mennos-desktop'
- name: Enable and start JuiceFS service
ansible.builtin.systemd:
name: juicefs.service
enabled: true
state: started
become: true
- name: Check if JuiceFS is mounted
ansible.builtin.shell: df -h | grep /mnt/object_storage
become: true
register: mount_check
ignore_errors: true
changed_when: false
- name: Display mount status
ansible.builtin.debug:
msg: "JuiceFS is successfully mounted at /mnt/object_storage"
when: mount_check.rc == 0
tags:
- juicefs

View File

@@ -0,0 +1,157 @@
---
- name: Server setup
block:
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure Borg is installed on Arch-based systems
ansible.builtin.package:
name: borg
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure Borg is installed on Debian/Ubuntu systems
ansible.builtin.package:
name: borgbackup
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Include JuiceFS tasks
ansible.builtin.include_tasks: juicefs.yml
tags:
- juicefs
- name: Include Dynamic DNS tasks
ansible.builtin.include_tasks: dynamic-dns.yml
tags:
- dynamic-dns
- name: Include Borg Backup tasks
ansible.builtin.include_tasks: borg-backup.yml
tags:
- borg-backup
- name: Include Borg Local Sync tasks
ansible.builtin.include_tasks: borg-local-sync.yml
tags:
- borg-local-sync
- name: System performance optimizations
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "fs.file-max", value: "2097152" } # Max open files for the entire system
- { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have
- { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory
- { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches
- { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket
- { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input
- { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state
- { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections
- name: Include service tasks
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
loop_control:
label: "{{ item.name }}"
tags:
- services
- always
vars:
services:
- name: dashy
enabled: true
hosts:
- mennos-desktop
- name: gitea
enabled: true
hosts:
- mennos-desktop
- name: factorio
enabled: true
hosts:
- mennos-desktop
- name: dozzle
enabled: true
hosts:
- mennos-desktop
- name: beszel
enabled: true
hosts:
- mennos-desktop
- name: caddy
enabled: true
hosts:
- mennos-desktop
- name: golink
enabled: true
hosts:
- mennos-desktop
- name: immich
enabled: true
hosts:
- mennos-desktop
- name: plex
enabled: true
hosts:
- mennos-desktop
- name: tautulli
enabled: true
hosts:
- mennos-desktop
- name: stash
enabled: true
hosts:
- mennos-desktop
- name: downloaders
enabled: true
hosts:
- mennos-desktop
- name: wireguard
enabled: true
hosts:
- mennos-desktop
- name: nextcloud
enabled: true
hosts:
- mennos-desktop
- name: echoip
enabled: true
hosts:
- mennos-desktop
- name: arr-stack
enabled: true
hosts:
- mennos-desktop
- name: home-assistant
enabled: true
hosts:
- mennos-desktop
- name: privatebin
enabled: true
hosts:
- mennos-desktop
- name: unifi-network-application
enabled: true
hosts:
- mennos-desktop
- name: avorion
enabled: true
hosts:
- mennos-desktop

View File

@@ -0,0 +1,38 @@
---
- name: Deploy ArrStack service
block:
- name: Set ArrStack directories
ansible.builtin.set_fact:
arr_stack_service_dir: "{{ ansible_env.HOME }}/.services/arr-stack"
arr_stack_data_dir: "/mnt/services/arr-stack"
- name: Create ArrStack directory
ansible.builtin.file:
path: "{{ arr_stack_service_dir }}"
state: directory
mode: "0755"
- name: Create ArrStack data directory
ansible.builtin.file:
path: "{{ arr_stack_data_dir }}"
state: directory
mode: "0755"
- name: Deploy ArrStack docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ arr_stack_service_dir }}/docker-compose.yml"
mode: "0644"
register: arr_stack_template_result
- name: Stop ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" down --remove-orphans
when: arr_stack_template_result.changed
- name: Start ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" up -d
when: arr_stack_template_result.changed
tags:
- services
- arr_stack
- arr-stack

View File

@@ -0,0 +1,181 @@
name: arr-stack
services:
radarr:
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 7878:7878
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/radarr-config:/config
- /mnt/data:/mnt/data
restart: "unless-stopped"
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
sonarr:
image: linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/sonarr-config:/config
- /mnt/data:/mnt/data
ports:
- 8989:8989
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
whisparr:
image: ghcr.io/hotio/whisparr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 6969:6969
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/whisparr-config:/config
- /mnt/data:/mnt/data
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
prowlarr:
container_name: prowlarr
image: linuxserver/prowlarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/prowlarr-config:/config
extra_hosts:
- host.docker.internal:host-gateway
ports:
- 9696:9696
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 512M
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=Europe/Amsterdam
ports:
- "8191:8191"
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 1G
overseerr:
image: sctx/overseerr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/overseerr-config:/app/config
ports:
- 5055:5055
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
- caddy_network
deploy:
resources:
limits:
memory: 512M
tdarr:
image: ghcr.io/haveagitgat/tdarr:latest
container_name: tdarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- serverIP=0.0.0.0
- serverPort=8266
- webUIPort=8265
- internalNode=true
- inContainer=true
- ffmpegVersion=7
- nodeName=MyInternalNode
- auth=false
- openBrowser=true
- maxLogSizeMB=10
- cronPluginUpdate=
- NVIDIA_DRIVER_CAPABILITIES=all
- NVIDIA_VISIBLE_DEVICES=all
volumes:
- {{ arr_stack_data_dir }}/tdarr-server:/app/server
- {{ arr_stack_data_dir }}/tdarr-config:/app/configs
- {{ arr_stack_data_dir }}/tdarr-logs:/app/logs
- /mnt/data:/media
- {{ arr_stack_data_dir }}/tdarr-cache:/temp
ports:
- 8265:8265
- 8266:8266
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
runtime: nvidia
devices:
- /dev/dri:/dev/dri
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
networks:
arr_stack_net:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,37 @@
---
- name: Deploy Avorion service
block:
- name: Set Avorion directories
ansible.builtin.set_fact:
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
avorion_data_dir: "/mnt/services/avorion"
- name: Create Avorion directory
ansible.builtin.file:
path: "{{ avorion_service_dir }}"
state: directory
mode: "0755"
- name: Create Avorion data directory
ansible.builtin.file:
path: "{{ avorion_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Avorion docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ avorion_service_dir }}/docker-compose.yml"
mode: "0644"
register: avorion_compose
- name: Stop Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
when: avorion_compose.changed
- name: Start Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
when: avorion_compose.changed
tags:
- services
- avorion

View File

@@ -0,0 +1,15 @@
services:
avorion:
image: rfvgyhn/avorion:latest
volumes:
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
ports:
- 27000:27000
- 27000:27000/udp
- 27003:27003/udp
- 27020:27020/udp
- 27021:27021/udp
deploy:
resources:
limits:
memory: 4G

View File

@@ -0,0 +1,37 @@
---
- name: Deploy Beszel service
block:
- name: Set Beszel directories
ansible.builtin.set_fact:
beszel_service_dir: "{{ ansible_env.HOME }}/.services/beszel"
beszel_data_dir: "/mnt/services/beszel"
- name: Create Beszel directory
ansible.builtin.file:
path: "{{ beszel_service_dir }}"
state: directory
mode: "0755"
- name: Create Beszel data directory
ansible.builtin.file:
path: "{{ beszel_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Beszel docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ beszel_service_dir }}/docker-compose.yml"
mode: "0644"
register: beszel_compose
- name: Stop Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" down --remove-orphans
when: beszel_compose.changed
- name: Start Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" up -d
when: beszel_compose.changed
tags:
- services
- beszel

View File

@@ -0,0 +1,37 @@
services:
beszel:
image: 'henrygd/beszel'
restart: unless-stopped
ports:
- '8090:8090'
volumes:
- {{beszel_data_dir}}/data:/beszel_data
- {{beszel_data_dir}}/socket:/beszel_socket
networks:
- beszel-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
beszel-agent:
image: henrygd/beszel-agent:latest
restart: unless-stopped
network_mode: host
volumes:
- {{beszel_data_dir}}/socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKkSIQDh1vS8lG+2Uw/9dK1eOgCHVCgQfP+Bfk4XPkdn'
deploy:
resources:
limits:
memory: 128M
networks:
beszel-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,228 @@
# Global configuration for country blocking
{
servers {
protocols h1 h2 h3
}
}
# Country blocking snippet using MaxMind GeoLocation - reusable across all sites
{% if enable_country_blocking | default(false) and allowed_countries_codes | default([]) | length > 0 %}
(country_block) {
@allowed_local {
remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
}
@not_allowed_countries {
not remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
not {
maxmind_geolocation {
db_path "/etc/caddy/geoip/GeoLite2-Country.mmdb"
allow_countries {{ allowed_countries_codes | join(' ') }}
}
}
}
respond @not_allowed_countries "Access denied" 403
}
{% else %}
(country_block) {
# Country blocking disabled
}
{% endif %}
{% if inventory_hostname == 'mennos-desktop' %}
git.mvl.sh {
import country_block
reverse_proxy gitea:3000
tls {{ caddy_email }}
}
git.vleeuwen.me {
import country_block
redir https://git.mvl.sh{uri}
tls {{ caddy_email }}
}
df.mvl.sh {
import country_block
redir / https://git.mvl.sh/vleeuwenmenno/dotfiles/raw/branch/master/setup.sh
tls {{ caddy_email }}
}
fsm.mvl.sh {
import country_block
reverse_proxy factorio-server-manager:80
tls {{ caddy_email }}
}
fsm.vleeuwen.me {
import country_block
redir https://fsm.mvl.sh{uri}
tls {{ caddy_email }}
}
beszel.mvl.sh {
import country_block
reverse_proxy beszel:8090
tls {{ caddy_email }}
}
beszel.vleeuwen.me {
import country_block
redir https://beszel.mvl.sh{uri}
tls {{ caddy_email }}
}
photos.mvl.sh {
import country_block
reverse_proxy immich:2283
tls {{ caddy_email }}
}
photos.vleeuwen.me {
import country_block
redir https://photos.mvl.sh{uri}
tls {{ caddy_email }}
}
home.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
home.vleeuwen.me {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
unifi.mvl.sh {
reverse_proxy unifi-controller:8443 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh {
reverse_proxy unifi-controller:8843 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh:80 {
redir https://hotspot.mvl.sh{uri} permanent
}
bin.mvl.sh {
import country_block
reverse_proxy privatebin:8080
tls {{ caddy_email }}
}
ip.mvl.sh ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
http://ip.mvl.sh http://ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
}
overseerr.mvl.sh {
import country_block
reverse_proxy overseerr:5055
tls {{ caddy_email }}
}
overseerr.vleeuwen.me {
import country_block
redir https://overseerr.mvl.sh{uri}
tls {{ caddy_email }}
}
plex.mvl.sh {
import country_block
reverse_proxy host.docker.internal:32400 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
plex.vleeuwen.me {
import country_block
redir https://plex.mvl.sh{uri}
tls {{ caddy_email }}
}
tautulli.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8181 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
tautulli.vleeuwen.me {
import country_block
redir https://tautulli.mvl.sh{uri}
tls {{ caddy_email }}
}
drive.mvl.sh drive.vleeuwen.me {
import country_block
# CalDAV and CardDAV redirects
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
# Handle other .well-known requests
handle /.well-known/* {
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
}
# Main reverse proxy configuration with proper headers
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
# Security headers
header {
# HSTS header for enhanced security (required by Nextcloud)
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
# Additional security headers recommended for Nextcloud
X-Content-Type-Options "nosniff"
X-Frame-Options "SAMEORIGIN"
Referrer-Policy "no-referrer"
X-XSS-Protection "1; mode=block"
X-Permitted-Cross-Domain-Policies "none"
X-Robots-Tag "noindex, nofollow"
}
tls {{ caddy_email }}
}
{% endif %}

View File

@@ -0,0 +1,15 @@
FROM caddy:2.9.1-builder AS builder
RUN xcaddy build \
--with github.com/porech/caddy-maxmind-geolocation
FROM caddy:2.9.1-alpine
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
# Create directory for MaxMind databases and logs
RUN mkdir -p /etc/caddy/geoip /var/log/caddy
EXPOSE 80 443
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]

View File

@@ -0,0 +1,59 @@
---
- name: Deploy Caddy service
block:
- name: Set Caddy directories
ansible.builtin.set_fact:
caddy_service_dir: "{{ ansible_env.HOME }}/.services/caddy"
caddy_data_dir: "/mnt/services/caddy"
geoip_db_path: "/mnt/services/echoip"
caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}"
- name: Create Caddy directory
ansible.builtin.file:
path: "{{ caddy_service_dir }}"
state: directory
mode: "0755"
- name: Setup country blocking
ansible.builtin.include_tasks: country-blocking.yml
- name: Copy Dockerfile for custom Caddy build
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
register: caddy_dockerfile
- name: Create Caddy network
ansible.builtin.command: docker network create caddy_default
register: create_caddy_network
failed_when:
- create_caddy_network.rc != 0
- "'already exists' not in create_caddy_network.stderr"
changed_when: create_caddy_network.rc == 0
- name: Deploy Caddy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ caddy_service_dir }}/docker-compose.yml"
mode: "0644"
register: caddy_compose
- name: Deploy Caddy Caddyfile
ansible.builtin.template:
src: Caddyfile.j2
dest: "{{ caddy_service_dir }}/Caddyfile"
mode: "0644"
register: caddy_file
- name: Stop Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
when: caddy_compose.changed or caddy_file.changed
- name: Start Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
when: caddy_compose.changed or caddy_file.changed
tags:
- caddy
- services
- reverse-proxy

View File

@@ -0,0 +1,50 @@
---
- name: Country blocking setup for Caddy with MaxMind GeoLocation
block:
- name: Copy Dockerfile for custom Caddy build with GeoIP
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
when: enable_country_blocking | default(false)
- name: Check if MaxMind Country database is available
ansible.builtin.stat:
path: "{{ geoip_db_path }}/GeoLite2-Country.mmdb"
register: maxmind_country_db
when: enable_country_blocking | default(false)
- name: Ensure log directory exists for Caddy
ansible.builtin.file:
path: "{{ caddy_data_dir }}/logs"
state: directory
mode: "0755"
become: true
when: enable_country_blocking | default(false)
- name: Display country blocking configuration
ansible.builtin.debug:
msg:
- "✅ Country blocking enabled: {{ enable_country_blocking | default(false) }}"
- "🛡️ Countries to allow: {{ allowed_countries_codes | default([]) | join(', ') }}"
- "📍 Using MaxMind GeoLocation plugin"
- "💾 Database path: /etc/caddy/geoip/GeoLite2-Country.mmdb"
- "📊 Database available: {{ maxmind_country_db.stat.exists | default(false) }}"
when: enable_country_blocking | default(false)
- name: Warn if MaxMind database not found
ansible.builtin.debug:
msg:
- "⚠️ WARNING: MaxMind Country database not found!"
- "Expected location: {{ geoip_db_path }}/GeoLite2-Country.mmdb"
- "Country blocking will not work until EchoIP service is deployed"
- "Run: dotf update --ansible --tags echoip"
when:
- enable_country_blocking | default(false)
- not maxmind_country_db.stat.exists | default(false)
tags:
- caddy
- security
- country-blocking
- geoip

View File

@@ -0,0 +1,32 @@
services:
caddy:
build:
context: .
dockerfile: Dockerfile
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- {{ caddy_data_dir }}/data:/data
- {{ caddy_data_dir }}/config:/config
- {{ caddy_service_dir }}/Caddyfile:/etc/caddy/Caddyfile
- {{ geoip_db_path }}:/etc/caddy/geoip:ro
- {{ caddy_data_dir }}/logs:/var/log/caddy
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
deploy:
resources:
limits:
memory: 512M
networks:
caddy_network:
name: caddy_default
enable_ipv6: true

View File

@@ -0,0 +1,324 @@
pageInfo:
title: Menno's Home
navLinks: []
sections:
- name: Selfhosted
items:
- title: Plex
icon: http://mennos-desktop:4000/assets/plex.svg
url: https://plex.mvl.sh
statusCheckUrl: https://plex.mvl.sh/identity
statusCheck: true
id: 0_1035_plex
- title: Tautulli
icon: http://mennos-desktop:4000/assets/tautulli.svg
url: https://tautulli.mvl.sh
id: 1_1035_tautulli
statusCheck: true
- title: Overseerr
icon: http://mennos-desktop:4000/assets/overseerr.svg
url: https://overseerr.mvl.sh
id: 2_1035_overseerr
statusCheck: true
- title: Immich
icon: http://mennos-desktop:4000/assets/immich.svg
url: https://photos.mvl.sh
id: 3_1035_immich
statusCheck: true
- title: Nextcloud
icon: http://mennos-desktop:4000/assets/nextcloud.svg
url: https://drive.mvl.sh
id: 3_1035_nxtcld
statusCheck: true
- title: ComfyUI
icon: http://mennos-desktop:8188/assets/favicon.ico
url: http://mennos-desktop:8188
statusCheckUrl: http://host.docker.internal:8188/api/system_stats
id: 3_1035_comfyui
statusCheck: true
displayData:
sortBy: default
rows: 1
cols: 2
collapsed: false
hideForGuests: false
- name: Media Management
items:
- title: Sonarr
icon: http://mennos-desktop:4000/assets/sonarr.svg
url: http://go/sonarr
id: 0_1533_sonarr
- title: Radarr
icon: http://mennos-desktop:4000/assets/radarr.svg
url: http://go/radarr
id: 1_1533_radarr
- title: Prowlarr
icon: http://mennos-desktop:4000/assets/prowlarr.svg
url: http://go/prowlarr
id: 2_1533_prowlarr
- title: Tdarr
icon: http://mennos-desktop:4000/assets/tdarr.png
url: http://go/tdarr
id: 3_1533_tdarr
- name: Kagi
items:
- title: Kagi Search
icon: favicon
url: https://kagi.com/
id: 0_380_kagisearch
- title: Kagi Translate
icon: favicon
url: https://translate.kagi.com/
id: 1_380_kagitranslate
- title: Kagi Assistant
icon: favicon
url: https://kagi.com/assistant
id: 2_380_kagiassistant
- name: News
items:
- title: Nu.nl
icon: http://mennos-desktop:4000/assets/nunl.svg
url: https://www.nu.nl/
id: 0_380_nu
- title: Tweakers.net
icon: favicon
url: https://www.tweakers.net/
id: 1_380_tweakers
- title: NL Times
icon: favicon
url: https://www.nltimes.nl/
id: 2_380_nl_times
- name: Downloaders
items:
- title: qBittorrent
icon: http://mennos-desktop:4000/assets/qbittorrent.svg
url: http://go/qbit
id: 0_1154_qbittorrent
tags:
- download
- torrent
- yarr
- title: Sabnzbd
icon: http://mennos-desktop:4000/assets/sabnzbd.svg
url: http://go/sabnzbd
id: 1_1154_sabnzbd
tags:
- download
- nzb
- yarr
- name: Git
items:
- title: GitHub
icon: http://mennos-desktop:4000/assets/github.svg
url: https://github.com/vleeuwenmenno
id: 0_292_github
tags:
- repo
- git
- hub
- title: Gitea
icon: http://mennos-desktop:4000/assets/gitea.svg
url: http://git.mvl.sh/vleeuwenmenno
id: 1_292_gitea
tags:
- repo
- git
- tea
- name: Server Monitoring
items:
- title: Beszel
icon: http://mennos-desktop:4000/assets/beszel.svg
url: http://go/beszel
tags:
- monitoring
- logs
id: 0_1725_beszel
- title: Dozzle
icon: http://mennos-desktop:4000/assets/dozzle.svg
url: http://go/dozzle
id: 1_1725_dozzle
tags:
- monitoring
- logs
- title: UpDown.io Status
icon: far fa-signal
url: http://go/status
id: 2_1725_updowniostatus
tags:
- monitoring
- logs
- name: Tools
items:
- title: Home Assistant
icon: http://mennos-desktop:4000/assets/home-assistant.svg
url: http://go/homeassistant
id: 0_529_homeassistant
- title: Tailscale
icon: http://mennos-desktop:4000/assets/tailscale.svg
url: http://go/tailscale
id: 1_529_tailscale
- title: GliNet KVM
icon: http://mennos-desktop:4000/assets/glinet.svg
url: http://go/glkvm
id: 2_529_glinetkvm
- title: Unifi Network Controller
icon: http://mennos-desktop:4000/assets/unifi.svg
url: http://go/unifi
id: 3_529_unifinetworkcontroller
- title: Dashboard Icons
icon: favicon
url: https://dashboardicons.com/
id: 4_529_dashboardicons
- name: Weather
items:
- title: Buienradar
icon: favicon
url: https://www.buienradar.nl/weer/Beverwijk/NL/2758998
id: 0_529_buienradar
- title: ClearOutside
icon: favicon
url: https://clearoutside.com/forecast/52.49/4.66
id: 1_529_clearoutside
- title: Windy
icon: favicon
url: https://www.windy.com/
id: 2_529_windy
- title: Meteoblue
icon: favicon
url: https://www.meteoblue.com/en/country/weather/radar/the-netherlands_the-netherlands_2750405
id: 2_529_meteoblue
- name: DiscountOffice
displayData:
sortBy: default
rows: 1
cols: 3
collapsed: false
hideForGuests: false
items:
- title: DiscountOffice.nl
icon: favicon
url: https://discountoffice.nl/
id: 0_1429_discountofficenl
tags:
- do
- discount
- work
- title: DiscountOffice.be
icon: favicon
url: https://discountoffice.be/
id: 1_1429_discountofficebe
tags:
- do
- discount
- work
- title: Admin NL
icon: favicon
url: https://discountoffice.nl/administrator
id: 2_1429_adminnl
tags:
- do
- discount
- work
- title: Admin BE
icon: favicon
url: https://discountoffice.be/administrator
id: 3_1429_adminbe
tags:
- do
- discount
- work
- title: Subsites
icon: favicon
url: https://elastomappen.nl
id: 4_1429_subsites
tags:
- do
- discount
- work
- title: Proxmox
icon: http://mennos-desktop:4000/assets/proxmox.svg
url: https://www.transip.nl/cp/vps/prm/350680/
id: 5_1429_proxmox
tags:
- do
- discount
- work
- title: Transip
icon: favicon
url: https://www.transip.nl/cp/vps/prm/350680/
id: 6_1429_transip
tags:
- do
- discount
- work
- title: Kibana
icon: http://mennos-desktop:4000/assets/kibana.svg
url: http://go/kibana
id: 7_1429_kibana
tags:
- do
- discount
- work
- name: Other
items:
- title: Whisparr
icon: http://mennos-desktop:4000/assets/whisparr.svg
url: http://go/whisparr
id: 0_514_whisparr
- title: Stash
icon: http://mennos-desktop:4000/assets/stash.svg
url: http://go/stash
id: 1_514_stash
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: true
hideForGuests: true
appConfig:
layout: auto
iconSize: large
theme: nord
startingView: default
defaultOpeningMethod: sametab
statusCheck: false
statusCheckInterval: 0
routingMode: history
enableMultiTasking: false
widgetsAlwaysUseProxy: false
webSearch:
disableWebSearch: false
searchEngine: https://kagi.com/search?q=
openingMethod: newtab
searchBangs: {}
enableFontAwesome: true
enableMaterialDesignIcons: false
hideComponents:
hideHeading: false
hideNav: true
hideSearch: false
hideSettings: true
hideFooter: false
auth:
enableGuestAccess: false
users: []
enableOidc: false
oidc:
adminRole: "false"
adminGroup: "false"
enableHeaderAuth: false
headerAuth:
userHeader: REMOTE_USER
proxyWhitelist: []
enableKeycloak: false
showSplashScreen: false
preventWriteToDisk: false
preventLocalSave: false
disableConfiguration: false
disableConfigurationForNonAdmin: false
allowConfigEdit: true
enableServiceWorker: false
disableContextMenu: false
disableUpdateChecks: false
disableSmartSort: false
enableErrorReporting: false

View File

@@ -0,0 +1,44 @@
---
- name: Deploy Dashy service
block:
- name: Set Dashy directories
ansible.builtin.set_fact:
dashy_service_dir: "{{ ansible_env.HOME }}/.services/dashy"
dashy_data_dir: "/mnt/services/dashy"
- name: Create Dashy directory
ansible.builtin.file:
path: "{{ dashy_service_dir }}"
state: directory
mode: "0755"
- name: Create Dashy data directory
ansible.builtin.file:
path: "{{ dashy_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dashy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dashy_service_dir }}/docker-compose.yml"
mode: "0644"
register: dashy_compose
- name: Deploy Dashy config.yml
ansible.builtin.template:
src: conf.yml.j2
dest: "{{ dashy_data_dir }}/conf.yml"
mode: "0644"
register: dashy_config
- name: Stop Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" down --remove-orphans
when: dashy_compose.changed
- name: Start Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" up -d
when: dashy_compose.changed
tags:
- services
- dashy

View File

@@ -0,0 +1,21 @@
services:
dashy:
image: lissy93/dashy:latest
restart: unless-stopped
ports:
- 4000:8080
volumes:
- {{dashy_data_dir}}/:/app/user-data
networks:
- caddy_network
extra_hosts:
- host.docker.internal:host-gateway
deploy:
resources:
limits:
memory: 2G
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,71 @@
name: downloaders
services:
gluetun:
image: qmcgaw/gluetun:latest
privileged: true
cap_add:
- NET_ADMIN
networks:
- arr_stack_net
ports:
- 6881:6881
- 6881:6881/udp
- 8085:8085 # Qbittorrent
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- {{ downloaders_data_dir }}/gluetun-config:/gluetun
environment:
- PUID=1000
- PGID=100
- VPN_SERVICE_PROVIDER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='VPN_SERVICE_PROVIDER') }}
- OPENVPN_USER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_USER') }}
- OPENVPN_PASSWORD={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_PASSWORD') }}
- SERVER_COUNTRIES={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='SERVER_COUNTRIES') }}
restart: always
deploy:
resources:
limits:
memory: 512M
sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/sabnzbd-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
restart: unless-stopped
ports:
- 7788:8080
deploy:
resources:
limits:
memory: 1G
qbittorrent:
image: lscr.io/linuxserver/qbittorrent
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=100
- WEBUI_PORT=8085
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/qbit-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
depends_on:
gluetun:
condition: service_healthy
restart: always
deploy:
resources:
limits:
memory: 1G
networks:
arr_stack_net:
external: true
name: arr_stack_net

View File

@@ -0,0 +1,47 @@
---
- name: Deploy Downloaders service
block:
- name: Set Downloaders directories
ansible.builtin.set_fact:
local_data_dir: "/mnt/data"
downloaders_service_dir: "{{ ansible_env.HOME }}/.services/downloaders"
downloaders_data_dir: "/mnt/services/downloaders"
- name: Create Downloaders directory
ansible.builtin.file:
path: "{{ downloaders_data_dir }}"
state: directory
mode: "0755"
- name: Create Downloaders service directory
ansible.builtin.file:
path: "{{ downloaders_service_dir }}"
state: directory
mode: "0755"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: true
- name: Deploy Downloaders docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ downloaders_service_dir }}/docker-compose.yml"
mode: "0644"
register: downloaders_compose
- name: Ensure arr_stack_net Docker network exists
community.docker.docker_network:
name: arr_stack_net
driver: bridge
state: present
- name: Stop Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" down --remove-orphans
when: downloaders_compose.changed
- name: Start Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" up -d
when: downloaders_compose.changed
tags:
- services
- downloaders

View File

@@ -0,0 +1,23 @@
services:
dozzle:
image: amir20/dozzle:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8800:8080
environment:
- DOZZLE_NO_ANALYTICS=true
restart: unless-stopped
networks:
- dozzle-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
networks:
dozzle-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,37 @@
---
- name: Deploy Dozzle service
block:
- name: Set Dozzle directories
ansible.builtin.set_fact:
dozzle_service_dir: "{{ ansible_env.HOME }}/.services/dozzle"
dozzle_data_dir: "/mnt/services/dozzle"
- name: Create Dozzle directory
ansible.builtin.file:
path: "{{ dozzle_service_dir }}"
state: directory
mode: "0755"
- name: Create Dozzle data directory
ansible.builtin.file:
path: "{{ dozzle_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dozzle docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dozzle_service_dir }}/docker-compose.yml"
mode: "0644"
register: dozzle_compose
- name: Stop Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" down --remove-orphans
when: dozzle_compose.changed
- name: Start Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" up -d
when: dozzle_compose.changed
tags:
- services
- dozzle

View File

@@ -0,0 +1,27 @@
services:
echoip:
container_name: 'echoip'
image: 'mpolden/echoip:latest'
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
volumes:
- {{echoip_data_dir}}/GeoLite2-ASN.mmdb:/opt/echoip/GeoLite2-ASN.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-City.mmdb:/opt/echoip/GeoLite2-City.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-Country.mmdb:/opt/echoip/GeoLite2-Country.mmdb:ro
command: >
-p -r -H "X-Forwarded-For" -l ":8080"
-a /opt/echoip/GeoLite2-ASN.mmdb
-c /opt/echoip/GeoLite2-City.mmdb
-f /opt/echoip/GeoLite2-Country.mmdb
deploy:
resources:
limits:
memory: 128M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,169 @@
---
- name: Deploy EchoIP service
block:
- name: Set EchoIP directories
ansible.builtin.set_fact:
echoip_service_dir: "{{ ansible_env.HOME }}/.services/echoip"
echoip_data_dir: "/mnt/services/echoip"
maxmind_account_id:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='account_id') | regex_replace('\\s+', '') }}"
maxmind_license_key:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='license_key') | regex_replace('\\s+', '') }}"
# Requires: gather_facts: true in playbook
- name: Check last update marker file
ansible.builtin.stat:
path: "{{ echoip_data_dir }}/.last_update"
register: echoip_update_marker
- name: Determine if update is needed (older than 24h or missing)
ansible.builtin.set_fact:
update_needed: "{{ (not echoip_update_marker.stat.exists) or ((ansible_date_time.epoch | int) - (echoip_update_marker.stat.mtime | default(0) | int) > 86400) }}"
- name: Create EchoIP directory
ansible.builtin.file:
path: "{{ echoip_service_dir }}"
state: directory
mode: "0755"
- name: Create EchoIP data directory
ansible.builtin.file:
path: "{{ echoip_data_dir }}"
state: directory
mode: "0755"
# Only update databases if needed (max once per 24h)
- block:
# Touch the marker file BEFORE attempting download to prevent repeated attempts on failure
- name: Update last update marker file (pre-download)
ansible.builtin.file:
path: "{{ echoip_data_dir }}/.last_update"
state: touch
# Create directories for extracted databases
- name: Create directory for ASN database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN"
state: directory
mode: "0755"
- name: Create directory for City database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City"
state: directory
mode: "0755"
- name: Create directory for Country database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country"
state: directory
mode: "0755"
# Download all databases
- name: Download GeoLite2 ASN database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
mode: "0644"
- name: Download GeoLite2 City database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
mode: "0644"
- name: Download GeoLite2 Country database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
mode: "0644"
# Extract all databases
- name: Extract GeoLite2 ASN database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN"
remote_src: true
register: asn_extracted
- name: Extract GeoLite2 City database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City"
remote_src: true
register: city_extracted
- name: Extract GeoLite2 Country database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country"
remote_src: true
register: country_extracted
# Move all databases to the correct locations
- name: Move ASN database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-ASN -name GeoLite2-ASN.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-ASN.mmdb \\;"
when: asn_extracted.changed
- name: Move City database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-City -name GeoLite2-City.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-City.mmdb \\;"
when: city_extracted.changed
- name: Move Country database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-Country -name GeoLite2-Country.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-Country.mmdb \\;"
when: country_extracted.changed
# Clean up unnecessary files
- name: Remove downloaded tar.gz files
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
state: absent
- name: Remove extracted ASN folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-ASN"
- name: Remove downloaded City tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
state: absent
- name: Remove extracted City folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-City"
- name: Remove downloaded Country tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
state: absent
- name: Remove extracted Country folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-Country"
# Update the marker file (no longer needed here, already touched before download)
when: update_needed
# Deploy and restart the EchoIP service
- name: Deploy EchoIP docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ echoip_service_dir }}/docker-compose.yml"
mode: "0644"
register: echoip_compose
- name: Stop EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" down --remove-orphans
when: echoip_compose.changed
- name: Start EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" up -d
when: echoip_compose.changed
tags:
- services
- echoip

View File

@@ -0,0 +1,31 @@
services:
factorio-server-manager:
image: "ofsm/ofsm:latest"
restart: "unless-stopped"
environment:
- PUID=1000
- PGID=100
- "FACTORIO_VERSION=stable"
- "RCON_PASS=458fc84534"
ports:
- "5080:80"
- "34197:34197/udp"
volumes:
- {{ factorio_data_dir }}/fsm-data:/opt/fsm-data
- {{ factorio_data_dir }}/factorio-data/saves:/opt/factorio/saves
- {{ factorio_data_dir }}/factorio-data/mods:/opt/factorio/mods
- {{ factorio_data_dir }}/factorio-data/config:/opt/factorio/config
- {{ factorio_data_dir }}/factorio-data/mod_packs:/opt/fsm/mod_packs
networks:
- factorio
- caddy_network
deploy:
resources:
limits:
memory: 2G
networks:
factorio:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,31 @@
---
- name: Deploy Factorio service
block:
- name: Set Factorio directories
ansible.builtin.set_fact:
factorio_service_dir: "{{ ansible_env.HOME }}/.services/factorio"
factorio_data_dir: "/mnt/services/factorio"
- name: Create Factorio directory
ansible.builtin.file:
path: "{{ factorio_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Factorio docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ factorio_service_dir }}/docker-compose.yml"
mode: "0644"
register: factorio_compose
- name: Stop Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" down --remove-orphans
when: factorio_compose.changed
- name: Start Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" up -d
when: factorio_compose.changed
tags:
- services
- factorio

View File

@@ -0,0 +1,98 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent: /tmp/act_runner

View File

@@ -0,0 +1,66 @@
services:
gitea:
image: gitea/gitea:latest
restart: always
environment:
- PUID=1000
- PGID=100
volumes:
- {{gitea_data_dir}}/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3030:3000"
- "22:22"
networks:
- gitea
- caddy_network
deploy:
resources:
limits:
memory: 1G
postgres:
image: postgres:15-alpine
restart: always
environment:
- PUID=1000
- PGID=100
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='POSTGRES_PASSWORD') }}
- POSTGRES_DB=gitea
volumes:
- {{gitea_data_dir}}/postgres:/var/lib/postgresql/data
networks:
- gitea
deploy:
resources:
limits:
memory: 1G
act_runner:
image: gitea/act_runner:latest
volumes:
- {{gitea_service_dir}}/act-runner-config.yaml:/config.yaml
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/act_runner:/tmp/act_runner
environment:
- PUID=1000
- PGID=100
- GITEA_INSTANCE_URL=https://git.mvl.sh
- GITEA_RUNNER_REGISTRATION_TOKEN={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='GITEA_RUNNER_REGISTRATION_TOKEN') }}
- GITEA_RUNNER_NAME=act-worker
- CONFIG_FILE=/config.yaml
restart: always
networks:
- gitea
deploy:
resources:
limits:
memory: 2G
networks:
gitea:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,44 @@
---
- name: Deploy Gitea service
block:
- name: Set Gitea directories
ansible.builtin.set_fact:
gitea_data_dir: "/mnt/services/gitea"
gitea_service_dir: "{{ ansible_env.HOME }}/.services/gitea"
- name: Create Gitea directories
ansible.builtin.file:
path: "{{ gitea_dir }}"
state: directory
mode: "0755"
loop:
- "{{ gitea_data_dir }}"
- "{{ gitea_service_dir }}"
loop_control:
loop_var: gitea_dir
- name: Deploy Gitea docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ gitea_service_dir }}/docker-compose.yml"
mode: "0644"
register: gitea_compose
- name: Deploy Gitea act-runner-config.yaml
ansible.builtin.template:
src: act-runner-config.yaml.j2
dest: "{{ gitea_service_dir }}/act-runner-config.yaml"
mode: "0644"
register: gitea_act_runner_config
- name: Stop Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" down --remove-orphans
when: gitea_compose.changed or gitea_act_runner_config.changed
- name: Start Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" up -d
when: gitea_compose.changed or gitea_act_runner_config.changed
tags:
- services
- gitea

View File

@@ -0,0 +1,14 @@
name: golink
services:
server:
image: ghcr.io/tailscale/golink:main
user: root
environment:
- TS_AUTHKEY={{ lookup('community.general.onepassword', 'GoLink', vault='Dotfiles', field='TS_AUTHKEY') }}
volumes:
- {{ golink_data_dir }}:/home/nonroot
restart: "unless-stopped"
deploy:
resources:
limits:
memory: 256M

View File

@@ -0,0 +1,36 @@
---
- name: Deploy GoLink service
block:
- name: Set GoLink directories
ansible.builtin.set_fact:
golink_data_dir: "/mnt/services/golink"
golink_service_dir: "{{ ansible_env.HOME }}/.services/golink"
- name: Create GoLink directories
ansible.builtin.file:
path: "{{ golink_dir }}"
state: directory
mode: "0755"
loop:
- "{{ golink_data_dir }}"
- "{{ golink_service_dir }}"
loop_control:
loop_var: golink_dir
- name: Deploy GoLink docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ golink_service_dir }}/docker-compose.yml"
mode: "0644"
register: golink_compose
- name: Stop GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" down --remove-orphans
when: golink_compose.changed
- name: Start GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" up -d
when: golink_compose.changed
tags:
- services
- golink

View File

@@ -0,0 +1,21 @@
services:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- "/var/run/dbus:/run/dbus:ro"
- {{ homeassistant_data_dir }}:/config
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=1000
restart: unless-stopped
privileged: true
network_mode: host
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
deploy:
resources:
limits:
memory: 2G

View File

@@ -0,0 +1,36 @@
---
- name: Deploy Home Assistant service
block:
- name: Set Home Assistant directories
ansible.builtin.set_fact:
homeassistant_data_dir: "/mnt/services/homeassistant"
homeassistant_service_dir: "{{ ansible_env.HOME }}/.services/homeassistant"
- name: Create Home Assistant directories
ansible.builtin.file:
path: "{{ homeassistant_dir }}"
state: directory
mode: "0755"
loop:
- "{{ homeassistant_data_dir }}"
- "{{ homeassistant_service_dir }}"
loop_control:
loop_var: homeassistant_dir
- name: Deploy Home Assistant docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ homeassistant_service_dir }}/docker-compose.yml"
mode: "0644"
register: homeassistant_compose
- name: Stop Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" down --remove-orphans
when: homeassistant_compose.changed
- name: Start Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" up -d
when: homeassistant_compose.changed
tags:
- services
- homeassistant

View File

@@ -0,0 +1,123 @@
services:
immich:
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
volumes:
- {{ immich_data_dir }}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
- caddy_network
runtime: nvidia
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
volumes:
- model-cache:/cache
env_file:
- .env
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
runtime: nvidia
deploy:
resources:
limits:
memory: 8G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 1G
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
environment:
PUID: 1000
PGID: 1000
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- {{ immich_database_dir }}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 2G
volumes:
model-cache:
networks:
immich:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,10 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Europe/Amsterdam
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
DB_USERNAME=postgres
DB_PASSWORD=postgres
DB_DATABASE_NAME=immich

View File

@@ -0,0 +1,44 @@
---
- name: Deploy Immich service
block:
- name: Set Immich directories
ansible.builtin.set_fact:
immich_data_dir: "/mnt/data/photos/immich-library"
immich_database_dir: "/mnt/services/immich/postgres"
immich_service_dir: "{{ ansible_env.HOME }}/.services/immich"
- name: Create Immich directories
ansible.builtin.file:
path: "{{ immich_dir }}"
state: directory
mode: "0755"
loop:
- "{{ immich_data_dir }}"
- "{{ immich_service_dir }}"
loop_control:
loop_var: immich_dir
- name: Deploy Immich docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ immich_service_dir }}/docker-compose.yml"
mode: "0644"
register: immich_compose
- name: Deploy Immich .env
ansible.builtin.template:
src: dotenv.j2
dest: "{{ immich_service_dir }}/.env"
mode: "0644"
register: immich_compose
- name: Stop Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" down --remove-orphans
when: immich_compose.changed
- name: Start Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" up -d
when: immich_compose.changed
tags:
- services
- immich

View File

@@ -0,0 +1,73 @@
services:
nextcloud:
image: nextcloud
container_name: nextcloud
restart: unless-stopped
networks:
- nextcloud
- caddy_network
depends_on:
- nextclouddb
- redis
ports:
- 8081:80
volumes:
- {{ nextcloud_data_dir }}/nextcloud/html:/var/www/html
- {{ nextcloud_data_dir }}/nextcloud/custom_apps:/var/www/html/custom_apps
- {{ nextcloud_data_dir }}/nextcloud/config:/var/www/html/config
- {{ nextcloud_data_dir }}/nextcloud/data:/var/www/html/data
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_HOST=nextclouddb
- REDIS_HOST=redis
deploy:
resources:
limits:
memory: 2G
nextclouddb:
image: mariadb:11.4.7
container_name: nextcloud-db
restart: unless-stopped
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
networks:
- nextcloud
volumes:
- {{ nextcloud_data_dir }}/database:/var/lib/mysql
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_RANDOM_ROOT_PASSWORD=true
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
deploy:
resources:
limits:
memory: 1G
redis:
image: redis:alpine
container_name: redis
volumes:
- {{ nextcloud_data_dir }}/redis:/data
networks:
- nextcloud
deploy:
resources:
limits:
memory: 512M
networks:
nextcloud:
name: nextcloud
driver: bridge
caddy_network:
name: caddy_default
external: true

View File

@@ -0,0 +1,31 @@
---
- name: Deploy Nextcloud service
block:
- name: Set Nextcloud directories
ansible.builtin.set_fact:
nextcloud_service_dir: "{{ ansible_env.HOME }}/.services/nextcloud"
nextcloud_data_dir: "/mnt/services/nextcloud"
- name: Create Nextcloud directory
ansible.builtin.file:
path: "{{ nextcloud_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Nextcloud docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ nextcloud_service_dir }}/docker-compose.yml"
mode: "0644"
register: nextcloud_compose
- name: Stop Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" down --remove-orphans
when: nextcloud_compose.changed
- name: Start Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" up -d
when: nextcloud_compose.changed
tags:
- services
- nextcloud

View File

@@ -0,0 +1,29 @@
services:
plex:
image: lscr.io/linuxserver/plex:latest
network_mode: host
restart: unless-stopped
runtime: nvidia
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- VERSION=docker
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
volumes:
- {{ plex_data_dir }}/config:/config
- {{ plex_data_dir }}/transcode:/transcode
- /mnt/data/movies:/movies
- /mnt/data/tvshows:/tvshows
- /mnt/object_storage/tvshows:/tvshows_slow
- /mnt/data/music:/music
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]

View File

@@ -0,0 +1,36 @@
---
- name: Deploy Plex service
block:
- name: Set Plex directories
ansible.builtin.set_fact:
plex_data_dir: "/mnt/services/plex"
plex_service_dir: "{{ ansible_env.HOME }}/.services/plex"
- name: Create Plex directories
ansible.builtin.file:
path: "{{ plex_dir }}"
state: directory
mode: "0755"
loop:
- "{{ plex_data_dir }}"
- "{{ plex_service_dir }}"
loop_control:
loop_var: plex_dir
- name: Deploy Plex docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ plex_service_dir }}/docker-compose.yml"
mode: "0644"
register: plex_compose
- name: Stop Plex service
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" down --remove-orphans
when: plex_compose.changed
- name: Start Plex service
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" up -d
when: plex_compose.changed
tags:
- services
- plex

View File

@@ -0,0 +1,300 @@
;<?php http_response_code(403); /*
; config file for PrivateBin
;
; An explanation of each setting can be find online at https://github.com/PrivateBin/PrivateBin/wiki/Configuration.
[main]
; (optional) set a project name to be displayed on the website
; name = "PrivateBin"
; The full URL, with the domain name and directories that point to the
; PrivateBin files, including an ending slash (/). This URL is essential to
; allow Opengraph images to be displayed on social networks.
basepath = "https://bin.mvl.sh/"
; enable or disable the discussion feature, defaults to true
discussion = false
; preselect the discussion feature, defaults to false
opendiscussion = false
; enable or disable the display of dates & times in the comments, defaults to true
; Note that internally the creation time will still get tracked in order to sort
; the comments by creation time, but you can choose not to display them.
; discussiondatedisplay = false
; enable or disable the password feature, defaults to true
password = true
; enable or disable the file upload feature, defaults to false
fileupload = false
; preselect the burn-after-reading feature, defaults to false
burnafterreadingselected = false
; which display mode to preselect by default, defaults to "plaintext"
; make sure the value exists in [formatter_options]
defaultformatter = "plaintext"
; (optional) set a syntax highlighting theme, as found in css/prettify/
; syntaxhighlightingtheme = "sons-of-obsidian"
; size limit per paste or comment in bytes, defaults to 10 Mebibytes
sizelimit = 10485760
; by default PrivateBin use "bootstrap" template (tpl/bootstrap.php).
; Optionally you can enable the template selection menu, which uses
; a session cookie to store the choice until the browser is closed.
templateselection = false
; List of available for selection templates when "templateselection" option is enabled
availabletemplates[] = "bootstrap5"
availabletemplates[] = "bootstrap"
availabletemplates[] = "bootstrap-page"
availabletemplates[] = "bootstrap-dark"
availabletemplates[] = "bootstrap-dark-page"
availabletemplates[] = "bootstrap-compact"
availabletemplates[] = "bootstrap-compact-page"
; set the template your installs defaults to, defaults to "bootstrap" (tpl/bootstrap.php), also
; bootstrap variants: "bootstrap-dark", "bootstrap-compact", "bootstrap-page",
; which can be combined with "-dark" and "-compact" for "bootstrap-dark-page",
; "bootstrap-compact-page" and finally "bootstrap5" (tpl/bootstrap5.php) - previews at:
; https://privatebin.info/screenshots.html
; template = "bootstrap"
; (optional) info text to display
; use single, instead of double quotes for HTML attributes
;info = "More information on the <a href='https://privatebin.info/'>project page</a>."
; (optional) notice to display
; notice = "Note: This is a test service: Data may be deleted anytime. Kittens will die if you abuse this service."
; by default PrivateBin will guess the visitors language based on the browsers
; settings. Optionally you can enable the language selection menu, which uses
; a session cookie to store the choice until the browser is closed.
languageselection = false
; set the language your installs defaults to, defaults to English
; if this is set and language selection is disabled, this will be the only language
; languagedefault = "en"
; (optional) URL shortener address to offer after a new paste is created.
; It is suggested to only use this with self-hosted shorteners as this will leak
; the pastes encryption key.
; urlshortener = "https://shortener.example.com/api?link="
; (optional) Let users create a QR code for sharing the paste URL with one click.
; It works both when a new paste is created and when you view a paste.
qrcode = true
; (optional) Let users send an email sharing the paste URL with one click.
; It works both when a new paste is created and when you view a paste.
; email = true
; (optional) IP based icons are a weak mechanism to detect if a comment was from
; a different user when the same username was used in a comment. It might get
; used to get the IP of a comment poster if the server salt is leaked and a
; SHA512 HMAC rainbow table is generated for all (relevant) IPs.
; Can be set to one these values:
; "none" / "identicon" / "jdenticon" (default) / "vizhash".
; icon = "none"
; Content Security Policy headers allow a website to restrict what sources are
; allowed to be accessed in its context. You need to change this if you added
; custom scripts from third-party domains to your templates, e.g. tracking
; scripts or run your site behind certain DDoS-protection services.
; Check the documentation at https://content-security-policy.com/
; Notes:
; - If you use the bootstrap5 theme, you must change default-src to 'self' to
; enable display of the svg icons
; - By default this disallows to load images from third-party servers, e.g. when
; they are embedded in pastes. If you wish to allow that, you can adjust the
; policy here. See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-not-it-load-embedded-images
; for details.
; - The 'wasm-unsafe-eval' is used to enable webassembly support (used for zlib
; compression). You can remove it if compression doesn't need to be supported.
; - The 'unsafe-inline' style-src is used by Chrome when displaying PDF previews
; and can be omitted if attachment upload is disabled (which is the default).
; See https://issues.chromium.org/issues/343754409
; - To allow displaying PDF previews in Firefox or Chrome, sandboxing must also
; get turned off. The following CSP allows PDF previews:
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self' 'unsafe-inline'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:"
;
; The recommended and default used CSP is:
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:; sandbox allow-same-origin allow-scripts allow-forms allow-modals allow-downloads"
; stay compatible with PrivateBin Alpha 0.19, less secure
; if enabled will use base64.js version 1.7 instead of 2.1.9 and sha1 instead of
; sha256 in HMAC for the deletion token
; zerobincompatibility = false
; Enable or disable the warning message when the site is served over an insecure
; connection (insecure HTTP instead of HTTPS), defaults to true.
; Secure transport methods like Tor and I2P domains are automatically whitelisted.
; It is **strongly discouraged** to disable this.
; See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-it-show-me-an-error-about-an-insecure-connection for more information.
; httpwarning = true
; Pick compression algorithm or disable it. Only applies to pastes/comments
; created after changing the setting.
; Can be set to one these values: "none" / "zlib" (default).
; compression = "zlib"
[expire]
; expire value that is selected per default
; make sure the value exists in [expire_options]
default = "1week"
[expire_options]
; Set each one of these to the number of seconds in the expiration period,
; or 0 if it should never expire
5min = 300
10min = 600
1hour = 3600
1day = 86400
1week = 604800
; Well this is not *exactly* one month, it's 30 days:
1month = 2592000
1year = 31536000
never = 0
[formatter_options]
; Set available formatters, their order and their labels
plaintext = "Plain Text"
syntaxhighlighting = "Source Code"
markdown = "Markdown"
[traffic]
; time limit between calls from the same IP address in seconds
; Set this to 0 to disable rate limiting.
limit = 10
; (optional) Set IPs addresses (v4 or v6) or subnets (CIDR) which are exempted
; from the rate-limit. Invalid IPs will be ignored. If multiple values are to
; be exempted, the list needs to be comma separated. Leave unset to disable
; exemptions.
; exempted = "1.2.3.4,10.10.10/24"
; (optional) If you want only some source IP addresses (v4 or v6) or subnets
; (CIDR) to be allowed to create pastes, set these here. Invalid IPs will be
; ignored. If multiple values are to be exempted, the list needs to be comma
; separated. Leave unset to allow anyone to create pastes.
; creators = "1.2.3.4,10.10.10/24"
; (optional) if your website runs behind a reverse proxy or load balancer,
; set the HTTP header containing the visitors IP address, i.e. X_FORWARDED_FOR
; header = "X_FORWARDED_FOR"
[purge]
; minimum time limit between two purgings of expired pastes, it is only
; triggered when pastes are created
; Set this to 0 to run a purge every time a paste is created.
limit = 300
; maximum amount of expired pastes to delete in one purge
; Set this to 0 to disable purging. Set it higher, if you are running a large
; site
batchsize = 10
[model]
; name of data model class to load and directory for storage
; the default model "Filesystem" stores everything in the filesystem
class = Filesystem
[model_options]
dir = PATH "data"
;[model]
; example of a Google Cloud Storage configuration
;class = GoogleCloudStorage
;[model_options]
;bucket = "my-private-bin"
;prefix = "pastes"
;uniformacl = false
;[model]
; example of DB configuration for MySQL
;class = Database
;[model_options]
;dsn = "mysql:host=localhost;dbname=privatebin;charset=UTF8"
;tbl = "privatebin_" ; table prefix
;usr = "privatebin"
;pwd = "Z3r0P4ss"
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of DB configuration for SQLite
;class = Database
;[model_options]
;dsn = "sqlite:" PATH "data/db.sq3"
;usr = null
;pwd = null
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of DB configuration for PostgreSQL
;class = Database
;[model_options]
;dsn = "pgsql:host=localhost;dbname=privatebin"
;tbl = "privatebin_" ; table prefix
;usr = "privatebin"
;pwd = "Z3r0P4ss"
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of S3 configuration for Rados gateway / CEPH
;class = S3Storage
;[model_options]
;region = ""
;version = "2006-03-01"
;endpoint = "https://s3.my-ceph.invalid"
;use_path_style_endpoint = true
;bucket = "my-bucket"
;accesskey = "my-rados-user"
;secretkey = "my-rados-pass"
;[model]
; example of S3 configuration for AWS
;class = S3Storage
;[model_options]
;region = "eu-central-1"
;version = "latest"
;bucket = "my-bucket"
;accesskey = "access key id"
;secretkey = "secret access key"
;[model]
; example of S3 configuration for AWS using its SDK default credential provider chain
; if relying on environment variables, the AWS SDK will look for the following:
; - AWS_ACCESS_KEY_ID
; - AWS_SECRET_ACCESS_KEY
; - AWS_SESSION_TOKEN (if needed)
; for more details, see https://docs.aws.amazon.com/sdk-for-php/v3/developer-guide/guide_credentials.html#default-credential-chain
;class = S3Storage
;[model_options]
;region = "eu-central-1"
;version = "latest"
;bucket = "my-bucket"
;[yourls]
; When using YOURLS as a "urlshortener" config item:
; - By default, "urlshortener" will point to the YOURLS API URL, with or without
; credentials, and will be visible in public on the PrivateBin web page.
; Only use this if you allow short URL creation without credentials.
; - Alternatively, using the parameters in this section ("signature" and
; "apiurl"), "urlshortener" needs to point to the base URL of your PrivateBin
; instance with "?shortenviayourls&link=" appended. For example:
; urlshortener = "${basepath}?shortenviayourls&link="
; This URL will in turn call YOURLS on the server side, using the URL from
; "apiurl" and the "access signature" from the "signature" parameters below.
; (optional) the "signature" (access key) issued by YOURLS for the using account
; signature = ""
; (optional) the URL of the YOURLS API, called to shorten a PrivateBin URL
; apiurl = "https://yourls.example.com/yourls-api.php"
;[sri]
; Subresource integrity (SRI) hashes used in template files. Uncomment and set
; these for all js files used. See:
; https://github.com/PrivateBin/PrivateBin/wiki/FAQ#user-content-how-to-make-privatebin-work-when-i-have-changed-some-javascript-files
;js/privatebin.js = "sha512-[…]"

View File

@@ -0,0 +1,33 @@
services:
privatebin:
image: privatebin/nginx-fpm-alpine:latest
container_name: privatebin
restart: always
read_only: true
user: "1000:1000"
ports:
- "8585:8080"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Amsterdam
volumes:
- {{ privatebin_data_dir }}:/srv/data
- {{ privatebin_service_dir }}/conf.php:/srv/cfg/conf.php:ro
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
networks:
- caddy_network
deploy:
resources:
limits:
memory: 256M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,42 @@
---
- name: Deploy PrivateBin service
block:
- name: Set PrivateBin directories
ansible.builtin.set_fact:
privatebin_data_dir: "/mnt/services/privatebin"
privatebin_service_dir: "{{ ansible_env.HOME }}/.services/privatebin"
- name: Create PrivateBin directories
ansible.builtin.file:
path: "{{ privatebin_dir }}"
state: directory
mode: "0755"
loop:
- "{{ privatebin_data_dir }}"
- "{{ privatebin_service_dir }}"
loop_control:
loop_var: privatebin_dir
- name: Deploy PrivateBin docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ privatebin_service_dir }}/docker-compose.yml"
mode: "0644"
register: privatebin_compose
- name: Deploy PrivateBin conf.php
ansible.builtin.template:
src: conf.php.j2
dest: "{{ privatebin_service_dir }}/conf.php"
mode: "0644"
- name: Stop PrivateBin service
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" down --remove-orphans
when: privatebin_compose.changed
- name: Start PrivateBin service
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" up -d
when: privatebin_compose.changed
tags:
- services
- privatebin

View File

@@ -0,0 +1,26 @@
services:
juicefs-redis:
image: redis:latest
restart: always
ports:
- "6379:6379"
volumes:
- /mnt/services/redis:/data
command: ["redis-server", "--appendonly", "yes", "--requirepass", "{{ REDIS_PASSWORD }}"]
environment:
- TZ=Europe/Amsterdam
healthcheck:
test: ["CMD", "redis-cli", "-a", "{{ REDIS_PASSWORD }}", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
networks:
- juicefs-network
deploy:
resources:
limits:
memory: 256M
networks:
juicefs-network:

View File

@@ -0,0 +1,80 @@
---
- name: Deploy Redis for JuiceFS
block:
- name: Set Redis facts
ansible.builtin.set_fact:
redis_service_dir: "{{ ansible_env.HOME }}/.services/juicefs-redis"
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
- name: Create Redis service directory
ansible.builtin.file:
path: "{{ redis_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Redis docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ redis_service_dir }}/docker-compose.yml"
mode: "0644"
register: redis_compose
vars:
REDIS_PASSWORD: "{{ redis_password }}"
- name: Check if juicefs.service exists
ansible.builtin.stat:
path: /etc/systemd/system/juicefs.service
register: juicefs_service_stat
- name: Stop juicefs.service to umount JuiceFS
ansible.builtin.systemd:
name: juicefs.service
state: stopped
enabled: false
register: juicefs_stop
changed_when: juicefs_stop.changed
when: redis_compose.changed and juicefs_service_stat.stat.exists
- name: List containers that are running
ansible.builtin.command: docker ps -q
register: docker_ps
changed_when: docker_ps.rc == 0
when: redis_compose.changed
- name: Stop all docker containers
ansible.builtin.command: docker stop {{ item }}
loop: "{{ docker_ps.stdout_lines }}"
register: docker_stop
changed_when: docker_stop.rc == 0
when: redis_compose.changed
ignore_errors: true
- name: Start Redis service
ansible.builtin.command: docker compose -f "{{ redis_service_dir }}/docker-compose.yml" up -d
register: redis_start
changed_when: redis_start.rc == 0
- name: Wait for Redis to be ready
ansible.builtin.wait_for:
host: localhost
port: 6379
timeout: 30
- name: Start juicefs.service to mount JuiceFS
ansible.builtin.systemd:
name: juicefs.service
state: started
enabled: true
register: juicefs_start
changed_when: juicefs_start.changed
when: juicefs_service_stat.stat.exists
- name: Restart containers that were stopped
ansible.builtin.command: docker start {{ item }}
loop: "{{ docker_stop.results | map(attribute='item') | list }}"
register: docker_restart
changed_when: docker_restart.rc == 0
when: redis_compose.changed
tags:
- services
- redis

View File

@@ -0,0 +1,43 @@
---
- name: Cleanup disabled services
block:
- name: Prepare cleanup list
ansible.builtin.set_fact:
services_to_cleanup: "{{ services | selectattr('enabled', 'equalto', false) | list }}"
- name: Check service directories existence for disabled services
ansible.builtin.stat:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
register: service_dir_results
loop: "{{ services_to_cleanup }}"
loop_control:
label: "{{ item.name }}"
- name: Filter services with existing directories
ansible.builtin.set_fact:
services_with_dirs: "{{ service_dir_results.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item') | list }}"
- name: Check if docker-compose file exists for services to cleanup
ansible.builtin.stat:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}/docker-compose.yml"
register: compose_file_results
loop: "{{ services_with_dirs }}"
loop_control:
label: "{{ item.name }}"
- name: Stop disabled services with docker-compose files
ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/.services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans
loop: "{{ compose_file_results.results | selectattr('stat.exists', 'equalto', true) }}"
loop_control:
label: "{{ item.item.name }}"
register: service_stop_results
become: false
failed_when: false # Continue even if the command fails
- name: Remove service directories for disabled services
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
state: absent
loop: "{{ services_with_dirs }}"
loop_control:
label: "{{ item.name }}"

View File

@@ -0,0 +1,41 @@
services:
stash:
image: stashapp/stash:latest
container_name: stash
restart: unless-stopped
ports:
- "9999:9999"
environment:
- PUID=1000
- PGID=1000
- STASH_STASH=/data/
- STASH_GENERATED=/generated/
- STASH_METADATA=/metadata/
- STASH_CACHE=/cache/
- STASH_PORT=9999
volumes:
- /etc/localtime:/etc/localtime:ro
## Point this at your collection.
- {{ stash_data_dir }}:/data
## Keep configs, scrapers, and plugins here.
- {{ stash_config_dir }}/config:/root/.stash
## This is where your stash's metadata lives
- {{ stash_config_dir }}/metadata:/metadata
## Any other cache content.
- {{ stash_config_dir }}/cache:/cache
## Where to store binary blob data (scene covers, images)
- {{ stash_config_dir }}/blobs:/blobs
## Where to store generated content (screenshots,previews,transcodes,sprites)
- {{ stash_config_dir }}/generated:/generated
networks:
- caddy_network
deploy:
resources:
limits:
memory: 2G
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,37 @@
---
- name: Deploy Stash service
block:
- name: Set Stash directories
ansible.builtin.set_fact:
stash_data_dir: "/mnt/data/stash"
stash_config_dir: "/mnt/services/stash"
stash_service_dir: "{{ ansible_env.HOME }}/.services/stash"
- name: Create Stash directories
ansible.builtin.file:
path: "{{ stash_dir }}"
state: directory
mode: "0755"
loop:
- "{{ stash_data_dir }}"
- "{{ stash_service_dir }}"
loop_control:
loop_var: stash_dir
- name: Deploy Stash docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ stash_service_dir }}/docker-compose.yml"
mode: "0644"
register: stash_compose
- name: Stop Stash service
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" down --remove-orphans
when: stash_compose.changed
- name: Start Stash service
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" up -d
when: stash_compose.changed
tags:
- services
- stash

View File

@@ -0,0 +1,25 @@
---
services:
tautulli:
image: lscr.io/linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1000
- PGID=100
- TZ=Etc/Amsterdam
volumes:
- {{ tautulli_data_dir }}:/config
ports:
- 8181:8181
restart: unless-stopped
networks:
- caddy_network
deploy:
resources:
limits:
memory: 512M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,36 @@
---
- name: Deploy Tautulli service
block:
- name: Set Tautulli directories
ansible.builtin.set_fact:
tautulli_data_dir: "{{ '/mnt/services/tautulli' }}"
tautulli_service_dir: "{{ ansible_env.HOME }}/.services/tautulli"
- name: Create Tautulli directories
ansible.builtin.file:
path: "{{ tautulli_dir }}"
state: directory
mode: "0755"
loop:
- "{{ tautulli_data_dir }}"
- "{{ tautulli_service_dir }}"
loop_control:
loop_var: tautulli_dir
- name: Deploy Tautulli docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ tautulli_service_dir }}/docker-compose.yml"
mode: "0644"
register: tautulli_compose
- name: Stop Tautulli service
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" down --remove-orphans
when: tautulli_compose.changed
- name: Start Tautulli service
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" up -d
when: tautulli_compose.changed
tags:
- services
- tautulli

View File

@@ -0,0 +1,65 @@
services:
unifi-controller:
image: linuxserver/unifi-network-application:latest
restart: unless-stopped
ports:
- "8080:8080" # Device communication
- "8443:8443" # Controller GUI / API
- "3478:3478/udp" # STUN
- "10001:10001/udp" # AP discovery
- "8880:8880" # HTTP portal redirect (guest hotspot)
- "8843:8843" # HTTPS portal redirect (guest hotspot)
- "6789:6789" # Mobile speed test (optional)
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MONGO_USER=unifi
- MONGO_PASS=unifi
- MONGO_HOST=unifi-db
- MONGO_PORT=27017
- MONGO_DBNAME=unifi
- MONGO_AUTHSOURCE=admin
volumes:
- {{ unifi_network_application_data_dir }}/data:/config
depends_on:
- unifi-db
networks:
- unifi-network
- caddy_network
sysctls:
- net.ipv6.conf.all.disable_ipv6=1
deploy:
resources:
limits:
memory: 1G
unifi-db:
image: mongo:6.0
restart: unless-stopped
volumes:
- {{ unifi_network_application_data_dir }}/db:/data/db
- {{ unifi_network_application_data_dir }}/init-mongo.sh:/docker-entrypoint-initdb.d/init-mongo.sh:ro
environment:
- MONGO_INITDB_ROOT_USERNAME=root
- MONGO_INITDB_ROOT_PASSWORD=root
- MONGO_INITDB_DATABASE=unifi
- MONGO_USER=unifi
- MONGO_PASS=unifi
- MONGO_DBNAME=unifi
- MONGO_AUTHSOURCE=admin
networks:
- unifi-network
sysctls:
- net.ipv6.conf.all.disable_ipv6=1
deploy:
resources:
limits:
memory: 1G
networks:
unifi-network:
driver: bridge
caddy_network:
external: true
name: caddy_default

View File

@@ -0,0 +1,78 @@
---
- name: Deploy Unifi Network App service
block:
- name: Set Unifi Network App directories
ansible.builtin.set_fact:
unifi_network_application_data_dir: "/mnt/services/unifi_network_application"
unifi_network_application_service_dir: "{{ ansible_env.HOME }}/.services/unifi_network_application"
- name: Create Unifi Network App directories
ansible.builtin.file:
path: "{{ unifi_network_application_dir }}"
state: directory
mode: "0755"
loop:
- "{{ unifi_network_application_data_dir }}"
- "{{ unifi_network_application_data_dir }}/data"
- "{{ unifi_network_application_data_dir }}/db"
- "{{ unifi_network_application_service_dir }}"
loop_control:
loop_var: unifi_network_application_dir
- name: Create MongoDB initialization script
ansible.builtin.copy:
content: |
#!/bin/bash
if which mongosh > /dev/null 2>&1; then
mongo_init_bin='mongosh'
else
mongo_init_bin='mongo'
fi
"${mongo_init_bin}" <<EOF
use ${MONGO_AUTHSOURCE}
db.auth("${MONGO_INITDB_ROOT_USERNAME}", "${MONGO_INITDB_ROOT_PASSWORD}")
db.createUser({
user: "${MONGO_USER}",
pwd: "${MONGO_PASS}",
roles: [
{ db: "${MONGO_DBNAME}", role: "dbOwner" },
{ db: "${MONGO_DBNAME}_stat", role: "dbOwner" },
{ db: "${MONGO_DBNAME}_audit", role: "dbOwner" }
]
})
EOF
dest: "{{ unifi_network_application_data_dir }}/init-mongo.sh"
mode: "0755"
register: unifi_mongo_init_script
- name: Deploy Unifi Network App docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ unifi_network_application_service_dir }}/docker-compose.yml"
mode: "0644"
register: unifi_network_application_compose
- name: Clean MongoDB database for fresh initialization
ansible.builtin.file:
path: "{{ unifi_network_application_data_dir }}/db"
state: absent
when: unifi_mongo_init_script.changed
- name: Recreate MongoDB database directory
ansible.builtin.file:
path: "{{ unifi_network_application_data_dir }}/db"
state: directory
mode: "0755"
when: unifi_mongo_init_script.changed
- name: Stop Unifi Network App service
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" down --remove-orphans
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
- name: Start Unifi Network App service
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" up -d
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
tags:
- services
- unifi

View File

@@ -0,0 +1,23 @@
services:
wireguard:
image: lscr.io/linuxserver/wireguard:latest
cap_add:
- NET_ADMIN
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- SERVERURL=mvl.sh
- PEERS=worklaptop,phone,desktop,personallaptop
- ALLOWEDIPS=0.0.0.0/0, ::/0
volumes:
- "{{ wireguard_data_dir }}/wg-data:/config"
ports:
- 51820:51820/udp
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M

View File

@@ -0,0 +1,31 @@
---
- name: Deploy WireGuard service
block:
- name: Set WireGuard directories
ansible.builtin.set_fact:
wireguard_service_dir: "{{ ansible_env.HOME }}/.services/wireguard"
wireguard_data_dir: "/mnt/services/wireguard"
- name: Create WireGuard directory
ansible.builtin.file:
path: "{{ wireguard_service_dir }}"
state: directory
mode: "0755"
- name: Deploy WireGuard docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ wireguard_service_dir }}/docker-compose.yml"
mode: "0644"
register: wireguard_compose
- name: Stop WireGuard service
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" down --remove-orphans
when: wireguard_compose.changed
- name: Start WireGuard service
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" up -d
when: wireguard_compose.changed
tags:
- services
- wireguard

View File

@@ -0,0 +1,51 @@
---
- name: Process 1Password custom allowed browsers
block:
- name: Check if 1Password is installed
ansible.builtin.command: 1password --version
register: onepassword_check
changed_when: false
failed_when: false
- name: Check if 1Password is running anywhere
ansible.builtin.command: pgrep 1password
register: onepassword_running
changed_when: false
failed_when: false
- name: Ensure 1Password custom allowed browsers directory exists
ansible.builtin.file:
path: /etc/1password
state: directory
mode: "0755"
become: true
- name: Add Browsers to 1Password custom allowed browsers
ansible.builtin.copy:
content: |
ZenBrowser
zen-browser
app.zen_browser.zen
zen
Firefox
firefox
opera
zen-x86_64
dest: /etc/1password/custom_allowed_browsers
owner: root
group: root
mode: "0755"
become: true
register: custom_browsers_file
- name: Kill any running 1Password instances if configuration changed
ansible.builtin.command: pkill 1password
when: custom_browsers_file.changed and onepassword_running.stdout != ""
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
- name: If 1Password was killed, restart it...
ansible.builtin.command: screen -dmS 1password 1password
when: custom_browsers_file.changed and onepassword_running.stdout != ""
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
tags:
- custom_allowed_browsers

View File

@@ -0,0 +1,6 @@
# Mark all files under the real autostart source as executable
- name: Mark all files under dotfiles autostart as executable
ansible.builtin.file:
path: "{{ lookup('env', 'DOTFILES_PATH') }}/config/autostart"
mode: "u+x,g+x,o+x"
recurse: true

View File

@@ -0,0 +1,41 @@
---
- name: Ensure wl-clipboard and cliphist are installed
become: true
package:
name:
- wl-clipboard
- cliphist
- wofi
state: present
- name: Create systemd user service for cliphist
become: false
copy:
dest: "{{ ansible_env.HOME }}/.config/systemd/user/cliphist-store.service"
mode: "0644"
content: |
[Unit]
Description=Store clipboard history with cliphist
[Service]
ExecStart=/usr/bin/sh -c 'wl-paste --watch cliphist store'
Restart=on-failure
[Install]
WantedBy=default.target
- name: Reload systemd user daemon
become: false
systemd:
daemon_reload: yes
scope: user
# TO ENABLE READING, ADD A KEYBOARD SHORTCUT SOMEWHERE IN YOUR WM CONFIGURATION
# cliphist list | wofi -S dmenu | cliphist decode | wl-copy
- name: Enable and start cliphist-store service
become: false
systemd:
name: cliphist-store.service
enabled: yes
state: started
scope: user

View File

@@ -0,0 +1,61 @@
---
- name: Install Firefox via APT (Not Snap)
block:
- name: Remove Firefox Snap if installed
community.general.snap:
name: firefox
state: absent
become: true
- name: Create APT keyring directory if it doesn't exist
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
become: true
- name: Import Mozilla APT repo signing key
ansible.builtin.get_url:
url: https://packages.mozilla.org/apt/repo-signing-key.gpg
dest: /etc/apt/keyrings/packages.mozilla.org.asc
mode: "0644"
become: true
- name: Add Mozilla APT repository
ansible.builtin.lineinfile:
path: /etc/apt/sources.list.d/mozilla.list
line: "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main"
create: true
mode: "0644"
become: true
- name: Set Firefox package priority
ansible.builtin.copy:
dest: /etc/apt/preferences.d/mozilla
content: |
Package: *
Pin: origin packages.mozilla.org
Pin-Priority: 1000
Package: firefox*
Pin: release o=Ubuntu
Pin-Priority: -1
mode: "0644"
become: true
- name: Update apt cache
ansible.builtin.apt:
update_cache: true
become: true
- name: Remove Ubuntu's Firefox transition package
ansible.builtin.apt:
name: firefox
state: absent
become: true
- name: Install Firefox from Mozilla's repository
ansible.builtin.apt:
name: firefox
state: present
become: true

View File

@@ -0,0 +1,116 @@
---
- name: Check if Flatpak is installed
ansible.builtin.command: which flatpak
register: flatpak_check
changed_when: false
failed_when: false
- name: Install Flatpak
ansible.builtin.package:
name: flatpak
state: present
become: true
when: flatpak_check.rc != 0
- name: Add Flathub remote repository
community.general.flatpak_remote:
name: flathub
flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
state: present
- name: Get list of system installed Flatpaks
ansible.builtin.command: flatpak list --system --app --columns=application
register: installed_system_flatpaks
changed_when: false
- name: Get list of system remotes
ansible.builtin.command: flatpak remote-list --system --columns=name
register: system_remotes
changed_when: false
- name: Define system desired Flatpaks
ansible.builtin.set_fact:
desired_system_flatpaks:
# GNOME Software
- "{{ 'org.gnome.Extensions' if (ansible_facts.env.XDG_CURRENT_DESKTOP is defined and 'GNOME' in ansible_facts.env.XDG_CURRENT_DESKTOP) else omit }}"
- "{{ 'org.gnome.Weather' if (ansible_facts.env.XDG_CURRENT_DESKTOP is defined and 'GNOME' in ansible_facts.env.XDG_CURRENT_DESKTOP) else omit }}"
- "{{ 'org.gnome.Sudoku' if (ansible_facts.env.XDG_CURRENT_DESKTOP is defined and 'GNOME' in ansible_facts.env.XDG_CURRENT_DESKTOP) else omit }}"
# Games
- io.github.openhv.OpenHV
- info.beyondallreason.bar
- org.godotengine.Godot
- dev.bragefuglseth.Keypunch
- org.prismlauncher.PrismLauncher
# Multimedia
- com.plexamp.Plexamp
- tv.plex.PlexDesktop
# Messaging
- com.rtosta.zapzap
- org.telegram.desktop
- org.signal.Signal
- com.spotify.Client
# Nextcloud Compatible Utilities
- io.github.mrvladus.List
- org.gnome.World.Iotas
# 3D Printing
- com.bambulab.BambuStudio
- io.mango3d.LycheeSlicer
# Utilities
- com.ranfdev.DistroShelf
- io.missioncenter.MissionCenter
- io.gitlab.elescoute.spacelaunch
- org.fkoehler.KTailctl
- com.usebottles.bottles
- com.github.tchx84.Flatseal
- com.github.wwmm.easyeffects
- io.gitlab.adhami3310.Impression
- io.ente.auth
- io.github.fastrizwaan.WineZGUI
- net.davidotek.pupgui2
- com.mastermindzh.tidal-hifi
- io.github.flattool.Warehouse
- io.github.johannesboehler2.BmiCalculator
- io.github.nokse22.Exhibit
- net.nokyan.Resources
- io.github.flattool.Ignition
- io.github.bytezz.IPLookup
- org.gaphor.Gaphor
- name: Define system desired Flatpak remotes
ansible.builtin.set_fact:
desired_system_flatpak_remotes:
- flathub
- name: Add desired system Flatpak remotes
community.general.flatpak_remote:
name: "{{ item }}"
state: present
method: system
loop: "{{ desired_system_flatpak_remotes }}"
- name: Remove undesired system Flatpak remotes
community.general.flatpak_remote:
name: "{{ item }}"
state: absent
method: system
loop: "{{ system_remotes.stdout_lines | difference(desired_system_flatpak_remotes) }}"
- name: Install/Upgrade Flatpak packages
community.general.flatpak:
name: "{{ item }}"
state: present
method: system
loop: "{{ desired_system_flatpaks | reject('equalto', omit) | list }}"
- name: Remove undesired system Flatpaks
community.general.flatpak:
name: "{{ item }}"
state: absent
method: system
loop: "{{ installed_system_flatpaks.stdout_lines | difference(desired_system_flatpaks | reject('equalto', omit) | list) }}"

View File

@@ -0,0 +1,18 @@
---
- name: Install Pano - Clipboard Manager dependencies
ansible.builtin.apt:
name:
- gir1.2-gda-5.0
- gir1.2-gsound-1.0
state: present
update_cache: true
become: true
- name: Install Pano - Clipboard Manager
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions/pano.yml
- name: Install Tiling Shell - Window Manager
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions/tilingshell.yml
- name: Install Quick Settings Tweaks
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions/quick-settings.yml

View File

@@ -0,0 +1,73 @@
---
- name: Manage GNOME extension
vars:
requested_git_tag: "{{ git_tag }}"
extension_name: "{{ ext_name }}"
extension_url: "{{ ext_url }}"
extension_path: "{{ ansible_user_dir }}/.local/share/gnome-shell/extensions/{{ ext_id }}"
version_file: "{{ extension_path }}/version.txt"
block:
- name: Check if extension is installed
ansible.builtin.stat:
path: "{{ extension_path }}"
register: ext_check
- name: Read last installed version
ansible.builtin.slurp:
src: "{{ version_file }}"
register: installed_version
ignore_errors: true
when: ext_check.stat.exists
- name: Determine if update is needed
ansible.builtin.set_fact:
update_needed: >-
{{ installed_version.content is not defined or
(installed_version.content | b64decode | trim != requested_git_tag) }}
- name: Delete old extension if updating
ansible.builtin.file:
path: "{{ extension_path }}"
state: absent
when: update_needed
- name: Create directory for extension
ansible.builtin.file:
path: "{{ extension_path }}"
state: directory
mode: "0755"
when: not ext_check.stat.exists or update_needed
- name: Download extension
ansible.builtin.get_url:
url: "{{ extension_url | replace('%TAG%', requested_git_tag) }}"
dest: "{{ extension_path }}/release.zip"
mode: "0644"
when: update_needed or not ext_check.stat.exists
- name: Extract extension
ansible.builtin.unarchive:
src: "{{ extension_path }}/release.zip"
dest: "{{ extension_path }}"
when: update_needed or not ext_check.stat.exists
- name: Store installed version of the extension
ansible.builtin.copy:
content: "{{ requested_git_tag }}"
dest: "{{ version_file }}"
mode: "0644"
when: update_needed or not ext_check.stat.exists
- name: Cleanup post installation
ansible.builtin.file:
path: "{{ extension_path }}/release.zip"
state: absent
when: not ext_check.stat.exists or update_needed
- name: Notify user of required GNOME Shell reload
ansible.builtin.debug:
msg: >
Please reload GNOME Shell by pressing Alt + F2, typing 'r' and pressing Enter.
Then enable the {{ extension_name }} in GNOME Tweaks.
Or on Wayland, log out and back in.
when: not ext_check.stat.exists or update_needed

View File

@@ -0,0 +1,8 @@
---
- name: Manage Pano Clipboard Manager
ansible.builtin.include_tasks: tasks/workstations/gnome-extensions/manage_gnome_extension.yml
vars:
git_tag: "v23-alpha5"
ext_name: "Pano - Clipboard Manager"
ext_url: "https://github.com/oae/gnome-shell-pano/releases/download/%TAG%/pano@elhan.io.zip"
ext_id: "pano@elhan.io"

View File

@@ -0,0 +1,8 @@
---
- name: Manage Quick Settings Tweaks
ansible.builtin.include_tasks: tasks/workstations/gnome-extensions/manage_gnome_extension.yml
vars:
git_tag: "2.1-stable"
ext_name: "Quick Settings Tweaks"
ext_url: "https://github.com/qwreey/quick-settings-tweaks/releases/download/2.1-stable/2.1-release.zip"
ext_id: "quick-settings-tweaks@qwreey"

View File

@@ -0,0 +1,8 @@
---
- name: Manage Tiling Shell - Window Manager
ansible.builtin.include_tasks: tasks/workstations/gnome-extensions/manage_gnome_extension.yml
vars:
git_tag: "16.3"
ext_name: "Tiling Shell - Window Manager"
ext_url: "https://github.com/domferr/tilingshell/releases/download/%TAG%/tilingshell@ferrarodomenico.com.zip"
ext_id: "tilingshell@ferrarodomenico.com"

View File

@@ -0,0 +1,21 @@
---
- name: Purge LibreOffice and related packages
become: true
ansible.builtin.apt:
name:
- libreoffice*
- libreoffice-common
- libreoffice-core
- libreoffice-writer
- libreoffice-calc
- libreoffice-impress
- libreoffice-draw
- libreoffice-base
- libreoffice-math
- libreoffice-gnome
- libreoffice-gtk3
state: absent
purge: true
autoremove: true
update_cache: true
when: ansible_pkg_mgr == 'apt'

View File

@@ -0,0 +1,75 @@
---
- name: Ensure snapd is installed
ansible.builtin.package:
name: snapd
state: present
become: true
- name: Ensure snapd service is enabled and started
ansible.builtin.systemd:
name: snapd
state: started
enabled: true
become: true
- name: Get list of installed Snaps
ansible.builtin.command: snap list
register: installed_snaps
changed_when: false
- name: Define protected system snaps
ansible.builtin.set_fact:
system_snaps:
- snapd
- core
- core18
- core20
- core22
- core24
- bare
- chromium
- gtk-common-themes
- gnome-3-28-1804
- gnome-3-34-1804
- gnome-3-38-2004
- gnome-42-2204
- desktop-security-center
- firmware-updater
- prompting-client
- snap-store
- snapd-desktop-integration
- gaming-graphics-core22
- name: Define desired Snaps
ansible.builtin.set_fact:
desired_snaps:
- name: beekeeper-studio
classic: false
- name: Install desired Snap packages
ansible.builtin.command: "snap install {{ item.name }} {{ '--classic' if item.classic else '' }}"
loop: "{{ desired_snaps }}"
become: true
register: snap_install
changed_when: "'already installed' not in snap_install.stderr"
failed_when:
- snap_install.rc != 0
- "'already installed' not in snap_install.stderr"
- name: Remove undesired Snap packages
ansible.builtin.command: "snap remove {{ item }}"
become: true
loop: >-
{{
installed_snaps.stdout_lines[1:]
| map('split', ' ')
| map('first')
| difference(desired_snaps | map(attribute='name'))
| difference(system_snaps)
}}
register: snap_remove
changed_when: snap_remove.rc == 0
failed_when:
- snap_remove.rc != 0
- "'not installed' not in snap_remove.stderr"
- "'cannot remove' not in snap_remove.stderr"

View File

@@ -0,0 +1,38 @@
---
- name: Set user home directory
ansible.builtin.set_fact:
user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}"
- name: Define workstation symlinks
ansible.builtin.set_fact:
workstation_symlinks:
- {
src: "$DOTFILES_PATH/vscode/settings.json",
dest: "~/.config/Code/User/settings.json",
}
- {
src: "$DOTFILES_PATH/zed/settings.json",
dest: "~/.config/zed/settings.json",
}
- { src: "$DOTFILES_PATH/config/autostart", dest: "~/.config/autostart" }
- name: Ensure parent directories for workstation symlinks exist
ansible.builtin.file:
path: "{{ item.dest | replace('~', user_home) | dirname }}"
state: directory
mode: "0755"
loop: "{{ workstation_symlinks }}"
- name: Remove existing autostart directory if it exists
ansible.builtin.file:
path: "{{ user_home }}/.config/autostart"
state: absent
- name: Create workstation symlinks
ansible.builtin.file:
src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
dest: "{{ item.dest | replace('~', user_home) }}"
state: link
force: true
follow: false
loop: "{{ workstation_symlinks }}"

View File

@@ -0,0 +1,74 @@
---
- name: Workstation Setup
block:
- name: Include workstation symlinks tasks
ansible.builtin.import_tasks: tasks/workstations/symlinks.yml
- name: Include workstation cliphist tasks
ansible.builtin.import_tasks: tasks/workstations/cliphist.yml
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include GNOME Extensions tasks
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions.yml
when: ansible_facts.env.XDG_CURRENT_DESKTOP is defined and 'GNOME' in ansible_facts.env.XDG_CURRENT_DESKTOP and 'microsoft-standard-WSL2' not in ansible_kernel
- name: Include Firefox APT installation tasks
ansible.builtin.import_tasks: tasks/workstations/firefox-apt.yml
when: ansible_pkg_mgr == 'apt' and ansible_facts.packages.snapd is defined and 'microsoft-standard-WSL2' not in ansible_kernel
- name: Include flatpaks tasks
ansible.builtin.import_tasks: tasks/workstations/flatpaks.yml
when: "'microsoft-standard-WSL2' not in ansible_kernel"
tags: flatpaks
- name: Include snaps tasks
ansible.builtin.import_tasks: tasks/workstations/snaps.yml
when: ansible_facts.packages.snapd is defined and 'microsoft-standard-WSL2' not in ansible_kernel
- name: Include Zen browser tasks
ansible.builtin.import_tasks: tasks/workstations/zen-browser.yml
vars:
browser_name: "zen"
browser_executable: "zen"
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include 1Password Browsers tasks
ansible.builtin.import_tasks: tasks/workstations/1password-browsers.yml
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include purge LibreOffice tasks
ansible.builtin.import_tasks: tasks/workstations/purge-libreoffice.yml
- name: Include autostart tasks
ansible.builtin.import_tasks: tasks/workstations/autostart.yml
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Ensure workstation common packages are installed
ansible.builtin.package:
name:
# Statistics HUD for gaming
- mangohud
# Used for VSCode Extensions
- nodejs
# DistroBox
- distrobox
state: present
become: true
- name: Ensure Ubuntu/Debian packages are installed
ansible.builtin.package:
name:
- clang
- cmake
- git
- ninja-build
- pkg-config
- libgtk-3-dev
- liblzma-dev
- libstdc++-12-dev
- dolphin-nextcloud
- nextcloud-desktop
state: present
when: ansible_pkg_mgr == 'apt'
become: true

View File

@@ -0,0 +1,89 @@
---
- name: Install Zen browser
block:
- name: Set Zen browser version
ansible.builtin.set_fact:
zen_browser_version: "1.15b"
- name: Create directory for browser
ansible.builtin.file:
path: "/opt/{{ browser_name }}"
state: directory
mode: "0755"
become: true
- name: Download Zen browser tarball
ansible.builtin.get_url:
url: "https://github.com/zen-browser/desktop/releases/download/{{ zen_browser_version }}/zen.linux-x86_64.tar.xz"
dest: "/tmp/{{ browser_name }}.tar.xz"
mode: "0644"
become: true
- name: Extract browser tarball
ansible.builtin.unarchive:
src: "/tmp/{{ browser_name }}.tar.xz"
dest: "/opt/{{ browser_name }}"
remote_src: true
extra_opts: [--strip-components=1]
become: true
- name: Create symlink to browser executable
ansible.builtin.file:
src: "/opt/{{ browser_name }}/{{ browser_executable }}"
dest: "/usr/local/bin/{{ browser_name }}"
state: link
become: true
- name: Create desktop file
ansible.builtin.copy:
content: |
[Desktop Entry]
Version=1.0
Name=Zen Browser
Exec=/usr/local/bin/{{ browser_name }} %u
Icon=/opt/zen/browser/chrome/icons/default/default128.png
Type=Application
MimeType=text/html;text/xml;application/xhtml+xml;x-scheme-handler/http;x-scheme-handler/https;application/x-xpinstall;application/pdf;application/json;
StartupWMClass=zen
Categories=Network;WebBrowser;
StartupNotify=true
Terminal=false
X-MultipleArgs=false
Keywords=Internet;WWW;Browser;Web;Explorer;
Actions=new-window;new-private-window;profilemanager;
[Desktop Action new-window]
Name=Open a New Window
Exec=/usr/local/bin/{{ browser_name }} %u
[Desktop Action new-private-window]
Name=Open a New Private Window
Exec=/usr/local/bin/{{ browser_name }} --private-window %u
[Desktop Action profilemanager]
Name=Open the Profile Manager
Exec=/usr/local/bin/{{ browser_name }} --ProfileManager %u
dest: "/usr/share/applications/zen.desktop"
mode: "0644"
become: true
- name: Update desktop database
ansible.builtin.command:
cmd: update-desktop-database /usr/share/applications
creates: /usr/share/applications/mimeinfo.cache
become: true
- name: Make desktop file executable
ansible.builtin.file:
dest: "/usr/share/applications/zen.desktop"
mode: "0755"
become: true
- name: Clean up downloaded tarball
ansible.builtin.file:
path: "/tmp/{{ browser_name }}.tar.xz"
state: absent
become: true
tags:
- zen-browser
- zen

View File

@@ -0,0 +1,31 @@
[Unit]
Description=Borg Backup Service
After=network.target
[Service]
Type=oneshot
User=root
Group=root
ExecStart={{ borg_config_dir }}/backup.sh
StandardOutput=journal
StandardError=journal
Environment="BORG_PASSPHRASE={{ borg_passphrase }}"
Environment="BORG_REPO={{ borg_repo_dir }}"
Environment="BORG_CACHE_DIR={{ borg_config_dir }}/cache"
Environment="BORG_CONFIG_DIR={{ borg_config_dir }}/config"
Environment="BORG_SECURITY_DIR={{ borg_config_dir }}/security"
Environment="BORG_KEYS_DIR={{ borg_config_dir }}/keys"
# Security settings
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ReadWritePaths=/mnt/services /mnt/object_storage /var/log {{ borg_config_dir }}
ProtectHome=read-only
ProtectControlGroups=true
RestrictRealtime=true
SystemCallFilter=@system-service
SystemCallErrorNumber=EPERM
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,157 @@
#!/bin/bash
# Borg backup script for /mnt/services
# This script creates incremental backups of the services directory
# Set environment variables
export BORG_REPO="{{ borg_repo_dir }}"
export BORG_PASSPHRASE="{{ borg_passphrase }}"
export BORG_CACHE_DIR="{{ borg_config_dir }}/cache"
export BORG_CONFIG_DIR="{{ borg_config_dir }}/config"
export BORG_SECURITY_DIR="{{ borg_config_dir }}/security"
export BORG_KEYS_DIR="{{ borg_config_dir }}/keys"
# Telegram notification variables
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='password') }}"
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='chat_id') }}"
# Backup name with timestamp
BACKUP_NAME="services-$(date +%Y%m%d-%H%M%S)"
# Log function
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a /var/log/borg-backup.log
}
# Telegram notification function
send_telegram() {
local message="$1"
local silent="${2:-false}"
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
log "Telegram credentials not configured, skipping notification"
return
fi
local payload=$(cat <<EOF
{
"chat_id": "$TELEGRAM_CHAT_ID",
"text": "$message",
"parse_mode": "HTML",
"disable_notification": $silent
}
EOF
)
curl -s -X POST \
-H "Content-Type: application/json" \
-d "$payload" \
"https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" > /dev/null 2>&1
if [ $? -eq 0 ]; then
log "Telegram notification sent successfully"
else
log "Failed to send Telegram notification"
fi
}
# Ensure all Borg directories exist
mkdir -p "$BORG_CACHE_DIR"
mkdir -p "$BORG_CONFIG_DIR"
mkdir -p "$BORG_SECURITY_DIR"
mkdir -p "$BORG_KEYS_DIR"
# Start backup
log "Starting Borg backup: $BACKUP_NAME"
# Create backup
borg create \
--verbose \
--filter AME \
--list \
--stats \
--show-rc \
--compression lz4 \
--exclude-caches \
--exclude '*.tmp' \
--exclude '*.temp' \
--exclude '*.log' \
--exclude '*/.cache' \
--exclude '*/cache' \
--exclude '*/logs' \
--exclude '*/tmp' \
--exclude '*/node_modules' \
--exclude '*/__pycache__' \
"::$BACKUP_NAME" \
{{ borg_backup_dir }}
backup_exit=$?
log "Backup finished with exit code: $backup_exit"
# Prune old backups (keep last 7 daily, 4 weekly, 6 monthly)
log "Pruning old backups"
# Check if there are any archives to prune first
archive_count=$(borg list --short --prefix 'services-' 2>/dev/null | wc -l)
if [ "$archive_count" -gt 1 ]; then
borg prune \
--list \
--prefix 'services-' \
--show-rc \
--keep-daily 7 \
--keep-weekly 4 \
--keep-monthly 6
prune_exit=$?
else
log "Only one or no archives found, skipping prune"
prune_exit=0
fi
log "Prune finished with exit code: $prune_exit"
# Compact repository
log "Compacting repository"
borg compact
compact_exit=$?
log "Compact finished with exit code: $compact_exit"
# Global exit status
global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit ))
global_exit=$(( compact_exit > global_exit ? compact_exit : global_exit ))
if [ $global_exit -eq 0 ]; then
log "Backup completed successfully"
send_telegram "🔒 <b>Borg Backup Success</b>
✅ Backup: $BACKUP_NAME completed successfully
📊 Repository: {{ borg_repo_dir }}
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
All operations completed without errors." "true"
elif [ $global_exit -eq 1 ]; then
log "Backup completed with warnings (exit code: $global_exit)"
send_telegram "⚠️ <b>Borg Backup Warning</b>
⚠️ Backup: $BACKUP_NAME completed with warnings
📊 Repository: {{ borg_repo_dir }}
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
Exit code: $global_exit
Check logs for details: /var/log/borg-backup.log"
else
log "Backup completed with warnings or errors (exit code: $global_exit)"
send_telegram "❌ <b>Borg Backup Failed</b>
❌ Backup: $BACKUP_NAME failed
📊 Repository: {{ borg_repo_dir }}
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
Exit code: $global_exit
Check logs immediately: /var/log/borg-backup.log"
fi
exit $global_exit

Some files were not shown because too many files have changed in this diff Show More