Compare commits

..

4 Commits

304 changed files with 396049 additions and 15415 deletions

167
.bashrc Normal file
View File

@@ -0,0 +1,167 @@
# HISTFILE Configuration (Bash equivalent)
HISTFILE=~/.bash_history
HISTSIZE=1000
HISTFILESIZE=2000 # Adjusted to match both histfile and size criteria
# Docker Compose Alias (Mostly for old shell scripts)
alias docker-compose='docker compose'
# Home Manager Configuration
alias hm='cd $HOME/dotfiles/config/home-manager/ && home-manager'
alias hmnews='hm news --flake .#$DOTF_HOSTNAME'
alias hmup='hm switch --flake .#$DOTF_HOSTNAME --impure'
# Modern tools aliases
alias l="eza --header --long --git --group-directories-first --group --icons --color=always --sort=name --hyperlink -o --no-permissions"
alias ll='l'
alias la='l -a'
alias cat='bat'
alias du='dust'
alias df='duf'
alias rm="trash-put"
# Docker Aliases
alias d='docker'
alias dc='docker compose'
alias dce='docker compose exec'
alias dcl='docker compose logs'
alias dcd='docker compose down'
alias dcu='docker compose up'
alias dcp='docker compose ps'
alias dcps='docker compose ps'
alias dcr='docker compose run'
alias ddpul='docker compose down && docker compose pull && docker compose up -d && docker compose logs -f'
# Git aliases
alias g='git'
alias gg='git pull'
alias gl='git log --stat'
alias gp='git push'
alias gs='git status -s'
alias gst='git status'
alias ga='git add'
alias gc='git commit'
alias gcm='git commit -m'
alias gco='git checkout'
alias gcb='git checkout -b'
# netstat port in use check
alias port='netstat -atupn | grep LISTEN'
# Alias for ls to l but only if it's an interactive shell because we don't want to override ls in scripts which could blow up in our face
if [ -t 1 ]; then
alias ls='l'
fi
# Alias for ssh.exe and ssh-add.exe on Windows WSL (microsoft-standard-WSL2)
if [[ $(uname -a) == *"microsoft-standard-WSL2"* ]]; then
alias op='op.exe'
fi
# PATH Manipulation
export PATH=$PATH:$HOME/.local/bin
export PATH=$PATH:$HOME/.cargo/bin
export PATH=$PATH:$HOME/dotfiles/bin
# Add flatpak to XDG_DATA_DIRS
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/usr/share:/var/lib/flatpak/exports/share:$HOME/.local/share/flatpak/exports/share
# Allow unfree nixos
export NIXPKGS_ALLOW_UNFREE=1
# Allow insecure nixpkgs
export NIXPKGS_ALLOW_INSECURE=1
# Set DOTF_HOSTNAME to the hostname from .hostname file
# If this file doesn't exist, use mennos-unknown-hostname
export DOTF_HOSTNAME="mennos-unknown-hostname"
if [ -f $HOME/.hostname ]; then
export DOTF_HOSTNAME=$(cat $HOME/.hostname)
fi
# Tradaware / DiscountOffice Configuration
if [ -d "/home/menno/Projects/Work" ]; then
export TRADAWARE_DEVOPS=true
fi
# Flutter Web and other tools that require Chrome
export CHROME_EXECUTABLE=$(which brave)
# 1Password Source Plugin (Assuming bash compatibility)
if [ -f /home/menno/.config/op/plugins.sh ]; then
source /home/menno/.config/op/plugins.sh
fi
# Initialize starship if available
if ! command -v starship &> /dev/null; then
echo "FYI, starship not found"
else
export STARSHIP_ENABLE_RIGHT_PROMPT=true
export STARSHIP_ENABLE_BASH_CONTINUATION=true
eval "$(starship init bash)"
fi
# Source nix home-manager
if [ -f "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh" ]; then
. "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh"
fi
# Source agent-bridge script for 1password
source $HOME/dotfiles/bin/1password-agent-bridge.sh
# zoxide if available
if command -v zoxide &> /dev/null; then
eval "$(zoxide init bash)"
fi
# Check if we are running from zellij, if not then launch it
launch_zellij_conditionally() {
if [ -z "$ZELLIJ" ]; then
# Don't launch zellij in tmux, vscode, screen or zeditor.
if [ ! -t 1 ] || [ -n "$TMUX" ] || [ -n "$VSCODE_STABLE" ] || [ -n "$STY" ] || [ -n "$ZED_TERM" ]; then
return
fi
# Launch zellij
zellij
# Exit if zellij exits properly with a zero exit code
if [ $? -eq 0 ]; then
exit $?
fi
echo "Zellij exited with a non-zero exit code, falling back to regular shell."
return
fi
}
# Disabled for now, I don't like the way it behaves but I don't want to remove it either
# launch_zellij_conditionally
# Source ble.sh if it exists
if [[ -f "${HOME}/.nix-profile/share/blesh/ble.sh" ]]; then
source "${HOME}/.nix-profile/share/blesh/ble.sh"
# Custom function for fzf history search
function fzf_history_search() {
local selected
selected=$(history | fzf --tac --height=40% --layout=reverse --border --info=inline \
--query="$READLINE_LINE" \
--color 'fg:#ebdbb2,bg:#282828,hl:#fabd2f,fg+:#ebdbb2,bg+:#3c3836,hl+:#fabd2f' \
--color 'info:#83a598,prompt:#bdae93,spinner:#fabd2f,pointer:#83a598,marker:#fe8019,header:#665c54' \
| sed 's/^ *[0-9]* *//')
if [[ -n "$selected" ]]; then
READLINE_LINE="$selected"
READLINE_POINT=${#selected}
fi
ble-redraw-prompt
}
# Bind Ctrl+R to our custom function
bind -x '"\C-r": fzf_history_search'
fi
# Display a welcome message for interactive shells
if [ -t 1 ]; then
dotf hello
fi

View File

@@ -1,37 +0,0 @@
name: Ansible Lint Check
on:
pull_request:
push:
branches: [ master ]
jobs:
check-ansible:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Ansible and ansible-lint
run: |
python -m pip install --upgrade pip
python -m pip install ansible ansible-lint
- name: Run ansible-lint
run: |
if [ ! -d "config/ansible" ]; then
echo "No ansible directory found at config/ansible"
exit 0
fi
found_files=$(find config/ansible -name "*.yml" -o -name "*.yaml")
if [ -z "$found_files" ]; then
echo "No Ansible files found in config/ansible to lint"
exit 0
fi
ansible-lint $found_files

View File

@@ -1,42 +0,0 @@
name: Python Lint Check
on:
pull_request:
push:
branches: [master]
jobs:
check-python:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Python linting tools
run: |
python -m pip install --upgrade pip
python -m pip install pylint black
- name: Run pylint
run: |
python_files=$(find . -name "*.py" -type f)
if [ -z "$python_files" ]; then
echo "No Python files found to lint"
exit 0
fi
pylint --exit-zero $python_files
- name: Check Black formatting
run: |
python_files=$(find . -name "*.py" -type f)
if [ -z "$python_files" ]; then
echo "No Python files found to lint"
exit 0
fi
black --check $python_files

12
.gitignore vendored
View File

@@ -1,4 +1,10 @@
config/ssh/config.d/*
!config/ssh/config.d/*.gpg
logs/*
**/__pycache__/
.ansible/
.ansible/.lock
# Don't include secrets in the repository but do include encrypted secrets
!secrets/**/*.gpg
secrets/**/*.*
# SHA256 hashes of the encrypted secrets
*.sha256

View File

@@ -6,9 +6,5 @@
"**/CVS": true,
"**/.DS_Store": true,
"**/*.sha256": true,
},
"files.associations": {
"*.yml": "ansible"
},
"ansible.python.interpreterPath": "/usr/bin/python3"
}
}

104
README.md
View File

@@ -1,27 +1,31 @@
# Setup
This dotfiles is intended to be used with either Fedora 40>, Ubuntu 20.04> or Arch Linux.
Please install a clean version of either distro and then follow the steps below.
This dotfiles is intended to be used with NixOS 24.05
Please install a clean version of NixOS GNOME and then follow the steps below.
## Installation
### 0. Install distro
### 0. Install NixOS
Download the latest ISO from your desired distro and write it to a USB stick.
Download the latest NixOS ISO from the [NixOS website](https://nixos.org/download.html) and write it to a USB stick.
I'd recommend getting the GNOME version as it's easier to setup and you can select minimal from the installer anyway if you want to just setup a headless server.
#### Note: If you intend on using a desktop environment you should select the GNOME version as this dotfiles repository expects the GNOME desktop environment for various configurations
### 1. Clone dotfiles to home directory
Open a shell and begin the setup process. This setup requires you to provide a hostname as a parameter. You can use an existing hostname to restore an old system or choose a new name.
Open a nix-shell with git and begin the setup process. This setup will prompt you various questions such as your desired hostname and if the system you are installing is supposed to be a server or workstation.
If you are running this in a VM be sure to answer yes if it prompts you.
Feel free to use an exisiting hostname to restore an old system or chose a new name.
If you are running this in a VM be sure to answer yes if it prompts you. This will ensure it generates the correct boot loader configuration.
```bash
curl -L https://df.mvl.sh | bash -s your-hostname
nix-shell -p git
curl -L https://df.mvl.sh | bash
```
Replace `your-hostname` with your desired hostname for this machine.
### 2. Relog/Reboot
### 2. Reboot
It's probably a good idea that you either reboot or log out and log back in to make sure all the changes are applied.
@@ -29,11 +33,7 @@ It's probably a good idea that you either reboot or log out and log back in to m
# sudo reboot
```
### 3. Create ~/.op_sat (Optional)
For servers you can place a file `~/.op_sat` with your 1Password Service Access Token, this can then be used by Ansible to fetch secrets for services. This is mostly for server systems so you're able to skip it for workstations.
### 4. Run `dotf update`
### 3. Run `dotf update`
Run the `dotf update` command, although the setup script did most of the work some symlinks still need to be set which at the moment is done using shell scripts.
@@ -41,6 +41,10 @@ Run the `dotf update` command, although the setup script did most of the work so
dotf update
```
### 4. Setup 1Password (Optional)
1Password is installed but you need to login and enable the SSH agent and CLI components under the settings before continuing.
### 5. Decrypt secrets
Either using 1Password or by manualling providing the decryption key you should decrypt the secrets.
@@ -61,41 +65,10 @@ You should now have a fully setup system with all the configurations applied.
Here are some paths that contain files named after the hostname of the system.
If you add a new system you should add the relevant files to these paths.
- `nconfig/nixos/hardware/`: Contains the hardware configurations for the different systems.
- `config/ssh/authorized_keys`: Contains the public keys per hostname that will be symlinked to the `~/.ssh/authorized_keys` file.
- `flake.nix`: Contains an array `homeConfigurations` where you should be adding the new system hostname and relevant configuration.
### Server reboots
In case you reboot a server, it's likely that this runs JuiceFS.
To be sure that every service is properly accessing JuiceFS mounted files you should probably restart the services once when the server comes online.
```bash
dotf service stop --all
df # confirm JuiceFS is mounted
dotf service start --all
```
### Object Storage (Servers only)
In case you need to adjust anything regarding the /mnt/object_storage JuiceFS.
Ensure to shut down all services:
```bash
dotf service stop --all
```
Unmount the volume:
```bash
sudo systemctl stop juicefs
```
And optionally if you're going to do something with metadata you might need to stop redis too.
```bash
cd ~/services/juicefs-redis/
docker compose down --remove-orphans
```
- `config/nixos/flake.nix`: Contains an array `nixosConfigurations` where you should be adding the new system hostname and relevant configuration.
- `config/home-manager/flake.nix`: Contains an array `homeConfigurations` where you should be adding the new system hostname and relevant configuration.
### Adding a new system
@@ -104,36 +77,3 @@ To add a new system you should follow these steps:
1. Add the relevant files shown in the section above.
2. Ensure you've either updated or added the `$HOME/.hostname` file with the hostname of the system.
3. Run `dotf update` to ensure the symlinks are properly updated/created.
---
## Using 1Password SSH Agent with WSL2 (Windows 11)
This setup allows you to use your 1Password-managed SSH keys inside WSL2. The WSL-side steps are automated by Ansible. The following Windows-side steps must be performed manually:
### Windows-side Setup
1. **Enable 1Password SSH Agent**
- Open the 1Password app on Windows.
- Go to **Settings → Developer** and enable **"Use the SSH agent"**.
2. **Install npiperelay using winget**
- Open PowerShell and run the following command:
```sh
winget install albertony.npiperelay
```
- This will install the latest maintained fork of npiperelay and add it to your PATH automatically.
3. **Restart Windows Terminal**
- After completing the above steps, restart your Windows Terminal to ensure all changes take effect.
4. **Test the SSH Agent in WSL2**
- Open your WSL2 terminal and run:
```sh
ssh-add -l
```
- If your 1Password keys are listed, the setup is complete.
#### References
- [Using 1Password's SSH Agent with WSL2](https://dev.to/d4vsanchez/use-1password-ssh-agent-in-wsl-2j6m)
- [How to change the PATH environment variable in Windows](https://www.wikihow.com/Change-the-PATH-Environment-Variable-on-Windows)

View File

@@ -1,25 +0,0 @@
# Ansible Configuration
## 1Password Integration
This Ansible configuration includes a custom lookup plugin for fetching secrets from 1Password.
The 1Password CLI must be installed and authenticated on the machine running Ansible.
See [1Password Integration Readme](plugins/lookup/README.md)
### Prerequisites
1. Install 1Password CLI
2. Sign in to 1Password using `op signin`
3. Service account should be properly configured
### Finding Vault IDs
To find your vault ID:
```bash
op vault list
```
For more information, see the [1Password CLI documentation](https://developer.1password.com/docs/cli).
```

View File

@@ -1,5 +0,0 @@
[defaults]
inventory = inventory
roles_path = roles
collections_paths = collections
retry_files_enabled = False

View File

@@ -1,31 +0,0 @@
---
flatpaks: false
install_ui_apps: false
# Countries that are allowed to access the server Caddy reverse proxy
allowed_countries_codes:
- US # United States
- CA # Canada
- GB # United Kingdom
- DE # Germany
- FR # France
- ES # Spain
- IT # Italy
- NL # Netherlands
- AU # Australia
- NZ # New Zealand
- JP # Japan
- KR # South Korea
- SK # Slovakia
- FI # Finland
- DK # Denmark
- SG # Singapore
- AT # Austria
- CH # Switzerland
# IP ranges for blocked countries (generated automatically)
# This will be populated by the country blocking script
blocked_countries: []
# Enable/disable country blocking globally
enable_country_blocking: true

View File

@@ -1,3 +0,0 @@
---
flatpaks: true
install_ui_apps: true

View File

@@ -1,30 +0,0 @@
---
- name: Systemctl daemon-reload
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: Restart SSH service
become: true
ansible.builtin.service:
name: ssh
state: restarted
enabled: true
- name: reload systemd
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: restart borg-local-sync
become: true
ansible.builtin.systemd:
name: borg-local-sync.service
enabled: true
- name: restart borg-local-sync-timer
become: true
ansible.builtin.systemd:
name: borg-local-sync.timer
state: restarted
enabled: true

View File

@@ -1,11 +0,0 @@
[workstations]
mennos-laptop ansible_connection=local
mennos-desktop ansible_connection=local
[servers]
mennos-vps ansible_connection=local
mennos-server ansible_connection=local
mennos-rtlsdr-pc ansible_connection=local
[wsl]
mennos-desktopw ansible_connection=local

View File

@@ -1,19 +0,0 @@
---
- name: Configure all hosts
hosts: all
handlers:
- name: Import handler tasks
ansible.builtin.import_tasks: handlers/main.yml
gather_facts: true
tasks:
- name: Include global tasks
ansible.builtin.import_tasks: tasks/global/global.yml
- name: Include workstation tasks
ansible.builtin.import_tasks: tasks/workstations/workstation.yml
when: inventory_hostname in ['mennos-laptop', 'mennos-desktop']
- name: Include server tasks
ansible.builtin.import_tasks: tasks/servers/server.yml
when: inventory_hostname in ['mennos-vps', 'mennos-server', 'mennos-rtlsdr-pc', 'mennos-desktopw']

View File

@@ -1,4 +0,0 @@
---
# Collections section
collections:
- community.general

View File

@@ -1,53 +0,0 @@
---
- name: Check if Docker CE is installed
ansible.builtin.command: docker --version
register: docker_check
changed_when: false
failed_when: false
# Arch-based distributions (CachyOS, Arch Linux, etc.)
- name: Install Docker on Arch-based systems
community.general.pacman:
name:
- docker
- docker-compose
- docker-buildx
state: present
become: true
when: docker_check.rc != 0 and ansible_pkg_mgr == 'pacman'
# Non-Arch distributions
- name: Download Docker installation script
ansible.builtin.get_url:
url: https://get.docker.com
dest: /tmp/get-docker.sh
mode: "0755"
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Install Docker CE on non-Arch systems
ansible.builtin.shell: bash -c 'set -o pipefail && sh /tmp/get-docker.sh'
args:
executable: /bin/bash
creates: /usr/bin/docker
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Add user to docker group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
become: true
when: docker_check.rc != 0
- name: Enable and start docker service
ansible.builtin.systemd:
name: docker
state: started
enabled: true
become: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
become: true
notify: Reload systemd

View File

@@ -1,125 +0,0 @@
---
- name: Gather package facts
ansible.builtin.package_facts:
manager: auto
become: true
- name: Include Tailscale tasks
ansible.builtin.import_tasks: tasks/global/tailscale.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Docker tasks
ansible.builtin.import_tasks: tasks/global/docker.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Ollama tasks
ansible.builtin.import_tasks: tasks/global/ollama.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include OpenSSH Server tasks
ansible.builtin.import_tasks: tasks/global/openssh-server.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Ensure common packages are installed on Arch-based systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python
- python-pip
- python-pipx
- python-pylint
- go
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure common packages are installed on non-Arch systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python3
- python3-pip
- python3-venv
- pylint
- black
- pipx
- nala
- golang
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Configure performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "vm.max_map_count", value: "16777216" }
# --- PBinCLI via pipx ---
- name: Ensure pbincli is installed with pipx
ansible.builtin.command: pipx install pbincli
args:
creates: ~/.local/bin/pbincli
environment:
PIPX_DEFAULT_PYTHON: /usr/bin/python3
become: false
- name: Ensure ~/.config/pbincli directory exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.config/pbincli"
state: directory
mode: "0755"
- name: Configure pbincli to use custom server
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.config/pbincli/pbincli.conf"
content: |
server=https://bin.mvl.sh
mode: "0644"
- name: Include WSL2 tasks
ansible.builtin.import_tasks: tasks/global/wsl.yml
when: "'microsoft-standard-WSL2' in ansible_kernel"
- name: Include Utils tasks
ansible.builtin.import_tasks: tasks/global/utils.yml
become: true
tags: utils
- name: Ensure ~/.hushlogin exists
ansible.builtin.stat:
path: ~/.hushlogin
register: hushlogin_stat
- name: Create ~/.hushlogin if it does not exist
ansible.builtin.file:
path: ~/.hushlogin
state: touch
mode: "0644"
when: not hushlogin_stat.stat.exists
# Ensure pwfeedback is enabled in sudoers for better password UX
- name: Ensure pwfeedback is present in Defaults env_reset line in /etc/sudoers
ansible.builtin.replace:
path: /etc/sudoers
regexp: '^Defaults\s+env_reset(?!.*pwfeedback)'
replace: "Defaults env_reset,pwfeedback"
validate: "visudo -cf %s"
become: true
tags: sudoers

View File

@@ -1,27 +0,0 @@
---
- name: Check if Ollama is installed
ansible.builtin.command: ollama --version
register: ollama_check
changed_when: false
failed_when: false
- name: Download Ollama install script
ansible.builtin.get_url:
url: https://ollama.com/install.sh
dest: /tmp/install_ollama.sh
mode: "0755"
when: ollama_check.rc != 0
- name: Install Ollama
ansible.builtin.command: bash -c 'set -o pipefail && sh /tmp/install_ollama.sh'
when: ollama_check.rc != 0
args:
creates: /usr/local/bin/ollama
- name: Check if Ollama is running
ansible.builtin.systemd:
name: ollama
state: started
enabled: true
become: true
register: ollama_service

View File

@@ -1,36 +0,0 @@
---
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH service is enabled and running on Arch-based systems
ansible.builtin.service:
name: sshd
state: started
enabled: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure SSH service is enabled and running on non-Arch systems
ansible.builtin.service:
name: ssh
state: started
enabled: true
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH server configuration is proper
ansible.builtin.template:
src: templates/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
validate: "/usr/sbin/sshd -t -f %s"
notify: Restart SSH service

View File

@@ -1,32 +0,0 @@
---
- name: Check if Tailscale is installed
ansible.builtin.command: which tailscale
register: tailscale_check
changed_when: false
failed_when: false
- name: Install Tailscale using curl script
ansible.builtin.shell: curl -fsSL https://tailscale.com/install.sh | sh
args:
creates: /usr/bin/tailscale
when: tailscale_check.rc != 0
become: true
- name: Check if Tailscale is running
ansible.builtin.command: tailscale status
register: tailscale_status
changed_when: false
failed_when: false
- name: Enable and start Tailscale service
ansible.builtin.systemd:
name: tailscaled
state: started
enabled: true
daemon_reload: true
become: true
- name: Notify user to authenticate Tailscale
ansible.builtin.debug:
msg: "Please authenticate Tailscale by running: sudo tailscale up --operator=$USER"
when: tailscale_status.rc != 0

View File

@@ -1,62 +0,0 @@
---
- name: Process utils files
block:
- name: Load DOTFILES_PATH environment variable
ansible.builtin.set_fact:
dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}"
become: false
- name: Ensure ~/.local/bin exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.local/bin"
state: directory
mode: "0755"
become: false
- name: Scan utils folder for files
ansible.builtin.find:
paths: "{{ dotfiles_path }}/ansible/tasks/global/utils"
file_type: file
register: utils_files
become: false
- name: Scan utils folder for Go projects (directories with go.mod)
ansible.builtin.find:
paths: "{{ dotfiles_path }}/ansible/tasks/global/utils"
file_type: directory
recurse: true
register: utils_dirs
become: false
- name: Filter directories that contain go.mod files
ansible.builtin.stat:
path: "{{ item.path }}/go.mod"
loop: "{{ utils_dirs.files }}"
register: go_mod_check
become: false
- name: Create symlinks for utils scripts
ansible.builtin.file:
src: "{{ item.path }}"
dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}"
state: link
loop: "{{ utils_files.files }}"
when: not item.path.endswith('.go')
become: false
- name: Compile standalone Go files and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}"
loop: "{{ utils_files.files }}"
when: item.path.endswith('.go')
become: false
- name: Compile Go projects and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.item.path | basename }}" .
chdir: "{{ item.item.path }}"
loop: "{{ go_mod_check.results }}"
when: item.stat.exists
become: false
tags:
- utils

View File

@@ -1,124 +0,0 @@
# Dynamic DNS OnePassword Setup
This document explains how to set up the required OnePassword entries for the Dynamic DNS automation.
## Overview
The Dynamic DNS task automatically retrieves credentials from OnePassword using the Ansible OnePassword lookup plugin. This eliminates the need for vault files and provides better security.
## Required OnePassword Entries
### 1. CloudFlare API Token
**Location:** `CloudFlare API Token` in `Dotfiles` vault, field `password`
**Setup Steps:**
1. Go to [CloudFlare API Tokens](https://dash.cloudflare.com/profile/api-tokens)
2. Click "Create Token"
3. Use the "Edit zone DNS" template
4. Configure permissions:
- Zone: DNS: Edit
- Zone Resources: Include all zones (or specific zones for your domains)
5. Add IP address filtering if desired (optional but recommended)
6. Click "Continue to summary" and "Create Token"
7. Copy the token and save it in OnePassword:
- Title: `CloudFlare API Token`
- Vault: `Dotfiles`
- Field: `password` (this should be the main password field)
### 2. Telegram Bot Credentials
**Location:** `Telegram DynDNS Bot` in `Dotfiles` vault, fields `password` and `chat_id`
**Setup Steps:**
#### Create Telegram Bot:
1. Message [@BotFather](https://t.me/BotFather) on Telegram
2. Send `/start` then `/newbot`
3. Follow the prompts to create your bot
4. Save the bot token (format: `123456789:ABCdefGHijklMNopQRstUVwxyz`)
#### Get Chat ID:
1. Send any message to your new bot
2. Visit: `https://api.telegram.org/bot<YOUR_BOT_TOKEN>/getUpdates`
3. Look for `"chat":{"id":YOUR_CHAT_ID}` in the response
4. Save the chat ID (format: `987654321` or `-987654321` for groups)
#### Save in OnePassword:
- Title: `Telegram DynDNS Bot`
- Vault: `Dotfiles`
- Fields:
- `password`: Your bot token (123456789:ABCdefGHijklMNopQRstUVwxyz)
- `chat_id`: Your chat ID (987654321)
## Verification
You can test that the OnePassword lookups work by running:
```bash
# Test CloudFlare token lookup
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}"
# Test Telegram bot token
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}"
# Test Telegram chat ID
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}"
```
## Security Notes
- Credentials are never stored in version control
- Environment file (`~/.local/bin/dynamic-dns.env`) has 600 permissions
- OnePassword CLI must be authenticated before running Ansible
- Make sure to run `op signin` before executing the playbook
## Troubleshooting
### OnePassword CLI Not Authenticated
```bash
op signin
```
### Missing Fields in OnePassword
Ensure the exact field names match:
- CloudFlare: field must be named `password`
- Telegram: fields must be named `password` and `chat_id`
### Invalid CloudFlare Token
- Check token has `Zone:DNS:Edit` permissions
- Verify token is active in CloudFlare dashboard
- Test with: `curl -H "Authorization: Bearer YOUR_TOKEN" https://api.cloudflare.com/client/v4/user/tokens/verify`
### Telegram Not Working
- Ensure you've sent at least one message to your bot
- Verify chat ID format (numbers only, may start with -)
- Test with: `go run dynamic-dns-cf.go --test-telegram`
## Usage
Once set up, the dynamic DNS will automatically:
- Update DNS records every 15 minutes
- Send Telegram notifications when IP changes
- Log all activity to system journal (`journalctl -t dynamic-dns`)
## Domains Configured
The automation updates these domains:
- `vleeuwen.me`
- `mvl.sh`
- `mennovanleeuwen.nl`
To modify the domain list, edit the wrapper script at:
`~/.local/bin/dynamic-dns-update.sh`

View File

@@ -1,903 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// CloudFlare API structures
type CloudFlareResponse struct {
Success bool `json:"success"`
Errors []CloudFlareError `json:"errors"`
Result json.RawMessage `json:"result"`
Messages []CloudFlareMessage `json:"messages"`
}
type CloudFlareError struct {
Code int `json:"code"`
Message string `json:"message"`
}
type CloudFlareMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
type DNSRecord struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Content string `json:"content"`
TTL int `json:"ttl"`
ZoneID string `json:"zone_id"`
}
type Zone struct {
ID string `json:"id"`
Name string `json:"name"`
}
type TokenVerification struct {
ID string `json:"id"`
Status string `json:"status"`
}
type NotificationInfo struct {
RecordName string
OldIP string
NewIP string
IsNew bool
}
// Configuration
type Config struct {
APIToken string
RecordNames []string
IPSources []string
DryRun bool
Verbose bool
Force bool
TTL int
TelegramBotToken string
TelegramChatID string
Client *http.Client
}
// Default IP sources
var defaultIPSources = []string{
"https://ifconfig.co/ip",
"https://ip.seeip.org",
"https://ipv4.icanhazip.com",
"https://api.ipify.org",
}
func main() {
config := &Config{
Client: &http.Client{Timeout: 10 * time.Second},
}
// Command line flags
var ipSourcesFlag string
var recordsFlag string
var listZones bool
var testTelegram bool
flag.StringVar(&recordsFlag, "record", "", "DNS A record name(s) to update - comma-separated for multiple (required)")
flag.StringVar(&ipSourcesFlag, "ip-sources", "", "Comma-separated list of IP detection services (optional)")
flag.BoolVar(&config.DryRun, "dry-run", false, "Show what would be done without making changes")
flag.BoolVar(&config.Verbose, "verbose", false, "Enable verbose logging")
flag.BoolVar(&listZones, "list-zones", false, "List all accessible zones and exit")
flag.BoolVar(&config.Force, "force", false, "Force update even if IP hasn't changed")
flag.BoolVar(&testTelegram, "test-telegram", false, "Send a test Telegram notification and exit")
flag.IntVar(&config.TTL, "ttl", 300, "TTL for DNS record in seconds")
// Custom usage function
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "CloudFlare Dynamic DNS Tool\n\n")
fmt.Fprintf(os.Stderr, "Updates CloudFlare DNS A records with your current public IP address.\n")
fmt.Fprintf(os.Stderr, "Supports multiple records, dry-run mode, and Telegram notifications.\n\n")
fmt.Fprintf(os.Stderr, "USAGE:\n")
fmt.Fprintf(os.Stderr, " %s [OPTIONS]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "REQUIRED ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " CLOUDFLARE_API_TOKEN CloudFlare API token with Zone:DNS:Edit permissions\n")
fmt.Fprintf(os.Stderr, " Get from: https://dash.cloudflare.com/profile/api-tokens\n\n")
fmt.Fprintf(os.Stderr, "OPTIONAL ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_BOT_TOKEN Telegram bot token for notifications\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_CHAT_ID Telegram chat ID to send notifications to\n\n")
fmt.Fprintf(os.Stderr, "OPTIONS:\n")
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "\nEXAMPLES:\n")
fmt.Fprintf(os.Stderr, " # Update single record\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Update multiple records\n")
fmt.Fprintf(os.Stderr, " %s -record \"home.example.com,api.example.com,vpn.mydomain.net\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Dry run with verbose output\n")
fmt.Fprintf(os.Stderr, " %s -dry-run -verbose -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Force update even if IP hasn't changed\n")
fmt.Fprintf(os.Stderr, " %s -force -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Custom TTL and IP sources\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com -ttl 600 -ip-sources \"https://ifconfig.co/ip,https://api.ipify.org\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # List accessible CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " %s -list-zones\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Test Telegram notifications\n")
fmt.Fprintf(os.Stderr, " %s -test-telegram\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "SETUP:\n")
fmt.Fprintf(os.Stderr, " 1. Create CloudFlare API token:\n")
fmt.Fprintf(os.Stderr, " - Go to https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, " - Use 'Edit zone DNS' template\n")
fmt.Fprintf(os.Stderr, " - Select your zones\n")
fmt.Fprintf(os.Stderr, " - Copy token and set CLOUDFLARE_API_TOKEN environment variable\n\n")
fmt.Fprintf(os.Stderr, " 2. Optional: Setup Telegram notifications:\n")
fmt.Fprintf(os.Stderr, " - Message @BotFather on Telegram to create a bot\n")
fmt.Fprintf(os.Stderr, " - Get your chat ID by messaging your bot, then visit:\n")
fmt.Fprintf(os.Stderr, " https://api.telegram.org/bot<BOT_TOKEN>/getUpdates\n")
fmt.Fprintf(os.Stderr, " - Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables\n\n")
fmt.Fprintf(os.Stderr, "NOTES:\n")
fmt.Fprintf(os.Stderr, " - Records can be in different CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " - Only updates when IP actually changes (unless -force is used)\n")
fmt.Fprintf(os.Stderr, " - Supports both root domains and subdomains\n")
fmt.Fprintf(os.Stderr, " - Telegram notifications sent only when IP changes\n")
fmt.Fprintf(os.Stderr, " - Use -dry-run to test without making changes\n\n")
}
flag.Parse()
// Validate required arguments (unless listing zones or testing telegram)
if recordsFlag == "" && !listZones && !testTelegram {
fmt.Fprintf(os.Stderr, "Error: -record flag is required\n")
flag.Usage()
os.Exit(1)
}
// Parse record names
if recordsFlag != "" {
config.RecordNames = strings.Split(recordsFlag, ",")
// Trim whitespace from each record name
for i, record := range config.RecordNames {
config.RecordNames[i] = strings.TrimSpace(record)
}
}
// Get API token from environment
config.APIToken = os.Getenv("CLOUDFLARE_API_TOKEN")
if config.APIToken == "" {
fmt.Fprintf(os.Stderr, "Error: CLOUDFLARE_API_TOKEN environment variable is required\n")
fmt.Fprintf(os.Stderr, "Get your API token from: https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, "Create a token with 'Zone:DNS:Edit' permissions for your zone\n")
os.Exit(1)
}
// Get optional Telegram credentials
config.TelegramBotToken = os.Getenv("TELEGRAM_BOT_TOKEN")
config.TelegramChatID = os.Getenv("TELEGRAM_CHAT_ID")
if config.Verbose && config.TelegramBotToken != "" && config.TelegramChatID != "" {
fmt.Println("Telegram notifications enabled")
}
// Parse IP sources
if ipSourcesFlag != "" {
config.IPSources = strings.Split(ipSourcesFlag, ",")
} else {
config.IPSources = defaultIPSources
}
if config.Verbose {
fmt.Printf("Config: Records=%v, TTL=%d, DryRun=%v, Force=%v, IPSources=%v\n",
config.RecordNames, config.TTL, config.DryRun, config.Force, config.IPSources)
}
// If testing telegram, do that and exit (skip API token validation)
if testTelegram {
if err := testTelegramNotification(config); err != nil {
fmt.Fprintf(os.Stderr, "Error testing Telegram: %v\n", err)
os.Exit(1)
}
return
}
// Validate API token
if err := validateToken(config); err != nil {
fmt.Fprintf(os.Stderr, "Error validating API token: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Println("API token validated successfully")
}
// If listing zones, do that and exit
if listZones {
if err := listAllZones(config); err != nil {
fmt.Fprintf(os.Stderr, "Error listing zones: %v\n", err)
os.Exit(1)
}
return
}
// Get current public IP
currentIP, err := getCurrentIP(config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting current IP: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Printf("Current public IP: %s\n", currentIP)
fmt.Printf("Processing %d record(s)\n", len(config.RecordNames))
}
// Process each record
var totalUpdates int
var allNotifications []NotificationInfo
for _, recordName := range config.RecordNames {
if config.Verbose {
fmt.Printf("\n--- Processing record: %s ---\n", recordName)
}
// Find the zone for the record
zoneName, zoneID, err := findZoneForRecord(config, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding zone for %s: %v\n", recordName, err)
continue
}
if config.Verbose {
fmt.Printf("Found zone: %s (ID: %s)\n", zoneName, zoneID)
}
// Find existing DNS record
record, err := findDNSRecordByName(config, zoneID, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding DNS record %s: %v\n", recordName, err)
continue
}
// Compare IPs
if record != nil {
if record.Content == currentIP && !config.Force {
fmt.Printf("DNS record %s already points to %s - no update needed\n", recordName, currentIP)
continue
}
if config.Verbose {
if record.Content == currentIP {
fmt.Printf("DNS record %s already points to %s, but forcing update\n",
recordName, currentIP)
} else {
fmt.Printf("DNS record %s currently points to %s, needs update to %s\n",
recordName, record.Content, currentIP)
}
}
} else {
if config.Verbose {
fmt.Printf("DNS record %s does not exist, will create it\n", recordName)
}
}
// Update or create record
if config.DryRun {
if record != nil {
if record.Content == currentIP && config.Force {
fmt.Printf("DRY RUN: Would force update DNS record %s (already %s)\n",
recordName, currentIP)
} else {
fmt.Printf("DRY RUN: Would update DNS record %s from %s to %s\n",
recordName, record.Content, currentIP)
}
} else {
fmt.Printf("DRY RUN: Would create DNS record %s with IP %s\n",
recordName, currentIP)
}
// Collect notification info for dry-run
if record == nil || record.Content != currentIP || config.Force {
var oldIPForNotification string
if record != nil {
oldIPForNotification = record.Content
}
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIPForNotification,
NewIP: currentIP,
IsNew: record == nil,
})
}
continue
}
var wasUpdated bool
var oldIP string
if record != nil {
oldIP = record.Content
err = updateDNSRecordByName(config, zoneID, record.ID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully updated DNS record %s to %s\n", recordName, currentIP)
wasUpdated = true
} else {
err = createDNSRecordByName(config, zoneID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully created DNS record %s with IP %s\n", recordName, currentIP)
wasUpdated = true
}
// Collect notification info for actual updates
if wasUpdated && (record == nil || oldIP != currentIP || config.Force) {
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIP,
NewIP: currentIP,
IsNew: record == nil,
})
totalUpdates++
}
}
// Send batch notification if there were any changes
if len(allNotifications) > 0 {
sendBatchTelegramNotification(config, allNotifications, config.DryRun)
}
if !config.DryRun && config.Verbose {
fmt.Printf("\nProcessed %d record(s), %d update(s) made\n", len(config.RecordNames), totalUpdates)
}
}
func validateToken(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/user/tokens/verify", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("token validation failed: %v", cfResp.Errors)
}
var tokenInfo TokenVerification
if err := json.Unmarshal(cfResp.Result, &tokenInfo); err != nil {
return err
}
if tokenInfo.Status != "active" {
return fmt.Errorf("token is not active, status: %s", tokenInfo.Status)
}
return nil
}
func getCurrentIP(config *Config) (string, error) {
var lastError error
for _, source := range config.IPSources {
if config.Verbose {
fmt.Printf("Trying IP source: %s\n", source)
}
resp, err := config.Client.Get(source)
if err != nil {
lastError = err
if config.Verbose {
fmt.Printf("Failed to get IP from %s: %v\n", source, err)
}
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastError = err
continue
}
if resp.StatusCode != 200 {
lastError = fmt.Errorf("HTTP %d from %s", resp.StatusCode, source)
continue
}
ip := strings.TrimSpace(string(body))
if ip != "" {
return ip, nil
}
lastError = fmt.Errorf("empty response from %s", source)
}
return "", fmt.Errorf("failed to get IP from any source, last error: %v", lastError)
}
func findZoneForRecord(config *Config, recordName string) (string, string, error) {
// Extract domain from record name (e.g., "sub.example.com" -> try "example.com", "com")
parts := strings.Split(recordName, ".")
if config.Verbose {
fmt.Printf("Finding zone for record: %s\n", recordName)
}
for i := 0; i < len(parts); i++ {
zoneName := strings.Join(parts[i:], ".")
req, err := http.NewRequest("GET",
fmt.Sprintf("https://api.cloudflare.com/client/v4/zones?name=%s", zoneName), nil)
if err != nil {
continue
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
continue
}
var cfResp CloudFlareResponse
err = json.NewDecoder(resp.Body).Decode(&cfResp)
resp.Body.Close()
if err != nil || !cfResp.Success {
continue
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
continue
}
if len(zones) > 0 {
return zones[0].Name, zones[0].ID, nil
}
}
return "", "", fmt.Errorf("no zone found for record %s", recordName)
}
func findDNSRecordByName(config *Config, zoneID string, recordName string) (*DNSRecord, error) {
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records?type=A&name=%s",
zoneID, recordName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return nil, err
}
if !cfResp.Success {
return nil, fmt.Errorf("API error: %v", cfResp.Errors)
}
var records []DNSRecord
if err := json.Unmarshal(cfResp.Result, &records); err != nil {
return nil, err
}
if len(records) == 0 {
return nil, nil // Record doesn't exist
}
return &records[0], nil
}
func updateDNSRecordByName(config *Config, zoneID, recordID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s", zoneID, recordID)
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func createDNSRecordByName(config *Config, zoneID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records", zoneID)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func listAllZones(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/zones", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
return err
}
fmt.Printf("Found %d accessible zones:\n", len(zones))
for _, zone := range zones {
fmt.Printf(" - %s (ID: %s)\n", zone.Name, zone.ID)
}
if len(zones) == 0 {
fmt.Println("No zones found. Make sure your API token has Zone:Read permissions.")
}
return nil
}
func sendTelegramNotification(config *Config, record *DNSRecord, oldIP, newIP string, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if record == nil {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", newIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", oldIP, newIP, config.TTL)
}
// Prepare Telegram API request
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
// Send notification
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}
func testTelegramNotification(config *Config) error {
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return fmt.Errorf("Telegram not configured. Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables")
}
fmt.Println("Testing Telegram notification...")
// Send a test message
message := "🧪 Dynamic DNS Test\n\n" +
"This is a test notification from your CloudFlare Dynamic DNS tool.\n\n" +
"✅ Telegram integration is working correctly!"
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to marshal payload: %v", err)
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %v", err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode == 200 {
fmt.Println("✅ Test notification sent successfully!")
if config.Verbose {
fmt.Printf("Response: %s\n", string(body))
}
return nil
} else {
return fmt.Errorf("failed to send notification (HTTP %d): %s", resp.StatusCode, string(body))
}
}
func sendBatchTelegramNotification(config *Config, notifications []NotificationInfo, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
if len(notifications) == 0 {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if len(notifications) == 1 {
// Single record notification
notif := notifications[0]
if notif.IsNew {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else if notif.OldIP == notif.NewIP {
message = fmt.Sprintf("%s🔄 DNS Record Force Updated\n\n"+
"Record: %s\n"+
"IP: %s (unchanged)\n"+
"TTL: %d seconds\n"+
"Note: Forced update requested",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.OldIP, notif.NewIP, config.TTL)
}
} else {
// Multiple records notification
var newCount, updatedCount int
for _, notif := range notifications {
if notif.IsNew {
newCount++
} else {
updatedCount++
}
}
message = fmt.Sprintf("%s📋 Multiple DNS Records Updated\n\n", dryRunPrefix)
if newCount > 0 {
message += fmt.Sprintf("🆕 Created: %d record(s)\n", newCount)
}
if updatedCount > 0 {
message += fmt.Sprintf("🔄 Updated: %d record(s)\n", updatedCount)
}
message += fmt.Sprintf("\nNew IP: %s\nTTL: %d seconds\n\nRecords:", notifications[0].NewIP, config.TTL)
for _, notif := range notifications {
if notif.IsNew {
message += fmt.Sprintf("\n• %s (new)", notif.RecordName)
} else if notif.OldIP == notif.NewIP {
message += fmt.Sprintf("\n• %s (forced)", notif.RecordName)
} else {
message += fmt.Sprintf("\n• %s (%s → %s)", notif.RecordName, notif.OldIP, notif.NewIP)
}
}
}
// Send the notification using the same logic as single notifications
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}

View File

@@ -1,328 +0,0 @@
package main
import (
"encoding/csv"
"flag"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
"time"
)
type Trip struct {
StartTime time.Time
EndTime time.Time
StartAddr string
EndAddr string
KMStart float64
KMEnd float64
Distance float64
License string
BusinessCost float64
Type string
}
type MonthStats struct {
TotalKM float64
Trips int
Longest float64
Shortest float64
TotalDuration time.Duration
LongestGap time.Duration
OdoAnomalies int
AvgSpeed float64
AvgTripDuration time.Duration
FuelCost float64
}
func main() {
fuelPrice := flag.Float64("fuelprice", 0, "Fuel price per liter (EUR)")
fuelEfficiency := flag.Float64("efficiency", 0, "Fuel efficiency (km per liter)")
lPer100km := flag.Float64("lper100km", 0, "Fuel consumption (liters per 100km)")
flag.Parse()
if len(flag.Args()) < 1 {
fmt.Println("Usage: go run main.go -fuelprice <price> [-efficiency <km/l> | -lper100km <l/100km>] <filename.csv>")
flag.PrintDefaults()
return
}
// Convert l/100km to km/l if provided
finalEfficiency := *fuelEfficiency
if *lPer100km > 0 {
finalEfficiency = 100.0 / *lPer100km
}
file, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
defer file.Close()
reader := csv.NewReader(file)
reader.Comma = ','
records, err := reader.ReadAll()
if err != nil {
panic(err)
}
dutchMonths := map[string]string{
"januari": "January", "februari": "February", "maart": "March",
"april": "April", "mei": "May", "juni": "June", "juli": "July",
"augustus": "August", "september": "September", "oktober": "October",
"november": "November", "december": "December",
}
tripsByMonth := make(map[string][]Trip)
startAddrCount := make(map[string]int)
endAddrCount := make(map[string]int)
fuelEnabled := *fuelPrice > 0 && finalEfficiency > 0
// Parse CSV
for _, record := range records[1:] {
if len(record) < 13 {
continue
}
// Parse start time
startTime, err := parseDutchTime(record[1], dutchMonths)
if err != nil {
continue
}
// Parse end time
endTime, err := parseDutchTime(record[2], dutchMonths)
if err != nil {
continue
}
// Parse distance data
kmStart, _ := strconv.ParseFloat(strings.ReplaceAll(record[5], ",", ""), 64)
kmEnd, _ := strconv.ParseFloat(strings.ReplaceAll(record[6], ",", ""), 64)
distance, _ := strconv.ParseFloat(strings.ReplaceAll(record[7], ",", ""), 64)
trip := Trip{
StartTime: startTime,
EndTime: endTime,
StartAddr: record[3],
EndAddr: record[4],
KMStart: kmStart,
KMEnd: kmEnd,
Distance: distance,
License: record[8],
BusinessCost: parseFloat(record[11]),
Type: strings.TrimSpace(record[12]),
}
monthKey := fmt.Sprintf("%d-%02d", startTime.Year(), startTime.Month())
tripsByMonth[monthKey] = append(tripsByMonth[monthKey], trip)
startAddrCount[trip.StartAddr]++
endAddrCount[trip.EndAddr]++
}
// Calculate stats
months := sortedKeys(tripsByMonth)
statsByMonth := calculateStats(tripsByMonth, fuelEnabled, *fuelPrice, finalEfficiency)
// Print results
printMainTable(statsByMonth, months, fuelEnabled, tripsByMonth)
printTopAddresses(startAddrCount, endAddrCount)
}
func parseDutchTime(datetime string, monthMap map[string]string) (time.Time, error) {
parts := strings.Split(datetime, " ")
if len(parts) < 4 {
return time.Time{}, fmt.Errorf("invalid time format")
}
engMonth, ok := monthMap[strings.ToLower(parts[1])]
if !ok {
return time.Time{}, fmt.Errorf("unknown month")
}
timeStr := fmt.Sprintf("%s %s %s %s", parts[0], engMonth, parts[2], parts[3])
return time.Parse("2 January 2006 15:04", timeStr)
}
func calculateStats(tripsByMonth map[string][]Trip, fuelEnabled bool, fuelPrice, fuelEfficiency float64) map[string]MonthStats {
stats := make(map[string]MonthStats)
for month, trips := range tripsByMonth {
var s MonthStats
var prevEnd time.Time
var longestGap time.Duration
sumSpeed := 0.0
speedCount := 0
sort.Slice(trips, func(i, j int) bool {
return trips[i].StartTime.Before(trips[j].StartTime)
})
for i, t := range trips {
s.TotalKM += t.Distance
s.Trips++
duration := t.EndTime.Sub(t.StartTime)
s.TotalDuration += duration
if duration.Hours() > 0 {
sumSpeed += t.Distance / duration.Hours()
speedCount++
}
if t.Distance > s.Longest {
s.Longest = t.Distance
}
if t.Distance < s.Shortest || s.Shortest == 0 {
s.Shortest = t.Distance
}
if i > 0 {
gap := t.StartTime.Sub(prevEnd)
if gap > longestGap {
longestGap = gap
}
if math.Abs(trips[i-1].KMEnd-t.KMStart) > 0.01 {
s.OdoAnomalies++
}
}
prevEnd = t.EndTime
}
s.LongestGap = longestGap
if speedCount > 0 {
s.AvgSpeed = sumSpeed / float64(speedCount)
} else {
s.AvgSpeed = 0
}
if s.Trips > 0 {
s.AvgTripDuration = time.Duration(int64(s.TotalDuration) / int64(s.Trips))
}
if fuelEnabled {
s.FuelCost = (s.TotalKM / fuelEfficiency) * fuelPrice
}
stats[month] = s
}
return stats
}
func printMainTable(stats map[string]MonthStats, months []string, fuelEnabled bool, tripsByMonth map[string][]Trip) {
fmt.Println("\n=== Monthly Driving Overview ===")
headers := []string{"Month", "Total", "Trips", "AvgKM", "Longest", "Shortest",
"DriveTime", "AvgTripDur", "OdoErr", "AvgSpeed"}
format := "%-10s | %-16s | %-7s | %-14s | %-24s | %-26s | %-18s | %-18s | %-10s | %-18s"
if fuelEnabled {
headers = append(headers, "Fuel Cost (EUR)")
format += " | %-18s"
}
fmt.Printf(format+"\n", toInterfaceSlice(headers)...) // print header
fmt.Println(strings.Repeat("-", 180))
for _, month := range months {
s := stats[month]
trips := tripsByMonth[month]
// Find longest and shortest trip durations
var longestDur, shortestDur time.Duration
var longestDist, shortestDist float64
if len(trips) > 0 {
for i, t := range trips {
dur := t.EndTime.Sub(t.StartTime)
if t.Distance > longestDist || i == 0 {
longestDist = t.Distance
longestDur = dur
}
if t.Distance < shortestDist || i == 0 {
shortestDist = t.Distance
shortestDur = dur
}
}
}
row := []interface{}{
month,
fmt.Sprintf("%.2f Km", s.TotalKM),
fmt.Sprintf("%d", s.Trips),
fmt.Sprintf("%.2f Km", safeDiv(s.TotalKM, float64(s.Trips))),
fmt.Sprintf("%.2f Km (%s)", longestDist, fmtDuration(longestDur)),
fmt.Sprintf("%.2f Km (%s)", shortestDist, fmtDuration(shortestDur)),
fmtDuration(s.TotalDuration),
fmtDuration(s.AvgTripDuration),
fmt.Sprintf("%d", s.OdoAnomalies),
fmt.Sprintf("%.2f Km/h", s.AvgSpeed),
}
if fuelEnabled {
row = append(row, fmt.Sprintf("%.2f EUR", s.FuelCost))
}
fmt.Printf(format+"\n", row...)
}
}
func toInterfaceSlice(strs []string) []interface{} {
res := make([]interface{}, len(strs))
for i, v := range strs {
res[i] = v
}
return res
}
func printTopAddresses(start, end map[string]int) {
fmt.Println("\n=== Frequent Locations ===")
fmt.Println("Top 3 Start Addresses:")
printTopN(start, 3)
fmt.Println("\nTop 3 End Addresses:")
printTopN(end, 3)
}
// Helper functions (safeDiv, fmtDuration, printTopN) remain unchanged from previous version
// [Include the helper functions from previous script here]
func safeDiv(a, b float64) float64 {
if b == 0 {
return 0
}
return a / b
}
func fmtDuration(d time.Duration) string {
h := int(d.Hours())
m := int(d.Minutes()) % 60
return fmt.Sprintf("%02dh%02dm", h, m)
}
func printTopN(counter map[string]int, n int) {
type kv struct {
Key string
Value int
}
var sorted []kv
for k, v := range counter {
sorted = append(sorted, kv{k, v})
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i].Value > sorted[j].Value })
for i := 0; i < n && i < len(sorted); i++ {
fmt.Printf("%d. %s (%d)\n", i+1, sorted[i].Key, sorted[i].Value)
}
}
func sortedKeys(m map[string][]Trip) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func parseFloat(s string) float64 {
f, _ := strconv.ParseFloat(strings.ReplaceAll(s, ",", ""), 64)
return f
}

View File

@@ -1,365 +0,0 @@
package main
import (
"fmt"
"math"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// ANSI color codes
var colors = map[string]string{
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;32m",
"yellow": "\033[0;33m",
"blue": "\033[0;34m",
"purple": "\033[0;35m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"grey": "\033[0;90m",
"reset": "\033[0m",
}
// DistroIcon represents a distro icon and color
type DistroIcon struct {
Icon string
Color string
}
// DotfilesStatus represents the git status of dotfiles
type DotfilesStatus struct {
IsDirty bool
Untracked int
Modified int
Staged int
CommitHash string
Unpushed int
}
func main() {
welcome()
}
func rainbowColor(text string, freq float64, offset float64) string {
var result strings.Builder
for i, char := range text {
if strings.TrimSpace(string(char)) != "" { // Only color non-whitespace characters
// Calculate RGB values using sine waves with phase shifts
r := int(127*math.Sin(freq*float64(i)+offset+0) + 128)
g := int(127*math.Sin(freq*float64(i)+offset+2*math.Pi/3) + 128)
b := int(127*math.Sin(freq*float64(i)+offset+4*math.Pi/3) + 128)
// Apply the RGB color to the character
result.WriteString(fmt.Sprintf("\033[38;2;%d;%d;%dm%c\033[0m", r, g, b, char))
} else {
result.WriteRune(char)
}
}
return result.String()
}
func printLogo() {
logo := ` __ ___ _ ____ __ _____ __
/ |/ /__ ____ ____ ____ ( )_____ / __ \____ / /_/ __(_) /__ _____
/ /|_/ / _ \/ __ \/ __ \/ __ \|// ___/ / / / / __ \/ __/ /_/ / / _ \/ ___/
/ / / / __/ / / / / / / /_/ / (__ ) / /_/ / /_/ / /_/ __/ / / __(__ )
/_/ /_/\___/_/ /_/_/ /_/\____/ /____/ /_____/\____/\__/_/ /_/_/\___/____/`
lines := strings.Split(logo, "\n")
for _, line := range lines {
if strings.TrimSpace(line) != "" {
fmt.Println(rainbowColor(line, 0.1, 0))
} else {
fmt.Println()
}
}
fmt.Println()
}
func getLastSSHLogin() string {
user := os.Getenv("USER")
if user == "" {
user = os.Getenv("USERNAME")
}
if user == "" {
return ""
}
// Try lastlog first
cmd := exec.Command("lastlog", "-u", user)
output, err := cmd.CombinedOutput()
if err != nil {
// Try lastlog2
cmd = exec.Command("lastlog2", user)
output, err = cmd.CombinedOutput()
if err != nil {
return ""
}
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) >= 2 {
parts := strings.Fields(lines[1])
if len(parts) >= 7 && strings.Contains(parts[1], "ssh") {
ip := parts[2]
timeStr := strings.Join(parts[3:], " ")
return fmt.Sprintf("%sLast SSH login%s%s %s%s from%s %s",
colors["cyan"], colors["reset"], colors["yellow"], timeStr, colors["cyan"], colors["yellow"], ip)
}
}
return ""
}
func checkDotfilesStatus() *DotfilesStatus {
dotfilesPath := os.Getenv("DOTFILES_PATH")
if dotfilesPath == "" {
homeDir, _ := os.UserHomeDir()
dotfilesPath = filepath.Join(homeDir, ".dotfiles")
}
gitPath := filepath.Join(dotfilesPath, ".git")
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
return nil
}
status := &DotfilesStatus{}
// Check git status
cmd := exec.Command("git", "status", "--porcelain")
cmd.Dir = dotfilesPath
output, err := cmd.Output()
if err == nil && strings.TrimSpace(string(output)) != "" {
status.IsDirty = true
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "??") {
status.Untracked++
}
if strings.HasPrefix(line, " M") || strings.HasPrefix(line, "MM") {
status.Modified++
}
if strings.HasPrefix(line, "M ") || strings.HasPrefix(line, "A ") {
status.Staged++
}
}
}
// Get commit hash
cmd = exec.Command("git", "rev-parse", "--short", "HEAD")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
status.CommitHash = strings.TrimSpace(string(output))
}
// Count unpushed commits
cmd = exec.Command("git", "log", "--oneline", "@{u}..")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) > 0 && lines[0] != "" {
status.Unpushed = len(lines)
}
}
return status
}
func getCondensedStatus() (string, string) {
var statusParts []string
var hashInfo string
// Check trash status
homeDir, _ := os.UserHomeDir()
trashPath := filepath.Join(homeDir, ".local", "share", "Trash", "files")
if entries, err := os.ReadDir(trashPath); err == nil {
count := len(entries)
if count > 0 {
statusParts = append(statusParts, fmt.Sprintf("[!] %d file(s) in trash", count))
}
}
// Check dotfiles status
dotfilesStatus := checkDotfilesStatus()
if dotfilesStatus != nil {
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, fmt.Sprintf("%sdotfiles is dirty%s", colors["yellow"], colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] untracked%s", colors["red"], dotfilesStatus.Untracked, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] modified%s", colors["yellow"], dotfilesStatus.Modified, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] staged%s", colors["green"], dotfilesStatus.Staged, colors["reset"]))
}
if dotfilesStatus.CommitHash != "" {
hashInfo = fmt.Sprintf("%s[%s%s%s]%s", colors["white"], colors["blue"], dotfilesStatus.CommitHash, colors["white"], colors["reset"])
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, hashInfo)
hashInfo = ""
}
}
if dotfilesStatus.Unpushed > 0 {
statusParts = append(statusParts, fmt.Sprintf("%s[!] You have %d commit(s) to push%s", colors["yellow"], dotfilesStatus.Unpushed, colors["reset"]))
}
} else {
statusParts = append(statusParts, "Unable to check dotfiles status")
}
statusLine := ""
if len(statusParts) > 0 {
statusLine = strings.Join(statusParts, " - ")
}
return statusLine, hashInfo
}
func runDotfilesCommand(args ...string) (string, error) {
cmd := exec.Command("dotfiles", args...)
output, err := cmd.Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(output)), nil
}
func getDistroIcon() (string, string) {
distroIcons := map[string]DistroIcon{
"windows": {"\uf17a", colors["blue"]}, // blue
"linux": {"\uf17c", colors["yellow"]}, // yellow
"ubuntu": {"\uf31b", "\033[38;5;208m"}, // orange (ANSI 208)
"debian": {"\uf306", colors["red"]}, // red
"arch": {"\uf303", colors["cyan"]}, // cyan
"fedora": {"\uf30a", colors["blue"]}, // blue
"alpine": {"\uf300", colors["cyan"]}, // cyan
"macos": {"\uf179", colors["white"]}, // white
"darwin": {"\uf179", colors["white"]}, // white
"osx": {"\uf179", colors["white"]}, // white
}
distro, err := runDotfilesCommand("variables", "get", "Platform.Distro", "--format", "raw")
if err != nil {
distro = strings.ToLower(runtime.GOOS)
} else {
distro = strings.ToLower(distro)
}
if icon, exists := distroIcons[distro]; exists {
return icon.Icon, icon.Color
}
// Try partial match
for key, icon := range distroIcons {
if strings.Contains(distro, key) {
return icon.Icon, icon.Color
}
}
return "", ""
}
func detectShell() string {
// Check for PowerShell profile
if os.Getenv("PROFILE") != "" || os.Getenv("PW_SH_PROFILE") != "" || os.Getenv("PSModulePath") != "" {
return "powershell"
}
if shell := os.Getenv("SHELL"); shell != "" {
return filepath.Base(shell)
}
if comspec := os.Getenv("COMSPEC"); comspec != "" {
if strings.HasSuffix(strings.ToLower(comspec), "cmd.exe") {
if os.Getenv("PROFILE") != "" {
return "Powershell"
}
return "CMD"
}
return filepath.Base(comspec)
}
return "unknown"
}
func welcome() {
printLogo()
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown-host"
}
// Get distro icon
distroIcon, iconColor := getDistroIcon()
// Get username
username := os.Getenv("USER")
if username == "" {
username = os.Getenv("USERNAME")
}
if username == "" {
username = "user"
}
// Get SSH login info
sshLogin := getLastSSHLogin()
// Get shell and arch
shell := detectShell()
arch := runtime.GOARCH
// Capitalize shell and arch for display
shellDisp := strings.Title(shell)
archDisp := strings.ToUpper(arch)
// Get package managers
pkgMgrs, err := runDotfilesCommand("variables", "get", "Platform.AvailablePackageManagers", "--format", "raw")
if err != nil {
pkgMgrs = ""
}
// Compact single line: user@hostname with icon, shell, arch
fmt.Printf("%s%s%s@%s%s", colors["green"], username, colors["cyan"], colors["yellow"], hostname)
if distroIcon != "" {
fmt.Printf(" %s%s", iconColor, distroIcon)
}
fmt.Printf("%s running %s%s%s/%s%s", colors["cyan"], colors["blue"], shellDisp, colors["cyan"], colors["purple"], archDisp)
if pkgMgrs != "" {
// Parse and color package managers
pkgMgrs = strings.Trim(pkgMgrs, "[]")
pmList := strings.Fields(strings.ReplaceAll(pkgMgrs, ",", ""))
pmColors := []string{colors["yellow"], colors["green"], colors["cyan"], colors["red"], colors["blue"]}
var coloredPMs []string
for i, pm := range pmList {
color := pmColors[i%len(pmColors)]
coloredPMs = append(coloredPMs, fmt.Sprintf("%s%s", color, pm))
}
fmt.Printf("%s [%s%s]", colors["cyan"], strings.Join(coloredPMs, colors["cyan"]+"/"), colors["reset"])
} else {
fmt.Printf("%s", colors["reset"])
}
// Get status info
condensedStatus, hashInfo := getCondensedStatus()
// Add hash to same line if dotfiles is clean
if hashInfo != "" {
fmt.Printf(" %s", hashInfo)
}
fmt.Println()
// Display last SSH login info if available
if sshLogin != "" {
fmt.Printf("%s%s\n", sshLogin, colors["reset"])
}
// Display condensed status line only if there are issues
if condensedStatus != "" {
fmt.Printf("%s%s%s\n", colors["yellow"], condensedStatus, colors["reset"])
}
}

View File

@@ -1,748 +0,0 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
// Color constants for terminal output
const (
Red = "\033[0;31m"
Green = "\033[0;32m"
Yellow = "\033[1;33m"
Blue = "\033[0;34m"
Cyan = "\033[0;36m"
Bold = "\033[1m"
NC = "\033[0m" // No Color
)
// ProcessInfo holds information about a process using a port
type ProcessInfo struct {
PID int
ProcessName string
Protocol string
DockerInfo string
}
// DockerContainer represents a Docker container
type DockerContainer struct {
Name string
Image string
Ports []PortMapping
Network string
}
// PortMapping represents a port mapping
type PortMapping struct {
ContainerPort int
HostPort int
Protocol string
IPv6 bool
}
func main() {
if len(os.Args) < 2 {
showUsage()
os.Exit(1)
}
arg := os.Args[1]
switch arg {
case "--help", "-h":
showHelp()
case "--list", "-l":
listDockerServices()
default:
port, err := strconv.Atoi(arg)
if err != nil || port < 1 || port > 65535 {
fmt.Printf("%sError:%s Invalid port number. Must be between 1 and 65535.\n", Red, NC)
os.Exit(1)
}
checkPort(port)
}
}
func showUsage() {
fmt.Printf("%sUsage:%s inuse <port_number>\n", Red, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
fmt.Printf("%s inuse --help%s\n", Yellow, NC)
fmt.Printf("%sExample:%s inuse 80\n", Yellow, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
}
func showHelp() {
fmt.Printf("%s%sinuse - Check if a port is in use%s\n\n", Cyan, Bold, NC)
fmt.Printf("%sUSAGE:%s\n", Bold, NC)
fmt.Printf(" inuse <port_number> Check if a specific port is in use\n")
fmt.Printf(" inuse --list, -l List all Docker services with listening ports\n")
fmt.Printf(" inuse --help, -h Show this help message\n\n")
fmt.Printf("%sEXAMPLES:%s\n", Bold, NC)
fmt.Printf(" %sinuse 80%s Check if port 80 is in use\n", Green, NC)
fmt.Printf(" %sinuse 3000%s Check if port 3000 is in use\n", Green, NC)
fmt.Printf(" %sinuse --list%s Show all Docker services with ports\n\n", Green, NC)
fmt.Printf("%sDESCRIPTION:%s\n", Bold, NC)
fmt.Printf(" The inuse function checks if a specific port is in use and identifies\n")
fmt.Printf(" the process using it. It can detect regular processes, Docker containers\n")
fmt.Printf(" with published ports, and containers using host networking.\n\n")
fmt.Printf("%sOUTPUT:%s\n", Bold, NC)
fmt.Printf(" %s✓%s Port is in use - shows process name, PID, and Docker info if applicable\n", Green, NC)
fmt.Printf(" %s✗%s Port is free\n", Red, NC)
fmt.Printf(" %s⚠%s Port is in use but process cannot be identified\n", Yellow, NC)
}
func listDockerServices() {
if !isDockerAvailable() {
fmt.Printf("%sError:%s Docker is not available\n", Red, NC)
os.Exit(1)
}
fmt.Printf("%s%sDocker Services with Listening Ports:%s\n\n", Cyan, Bold, NC)
containers := getRunningContainers()
if len(containers) == 0 {
fmt.Printf("%sNo running Docker containers found%s\n", Yellow, NC)
return
}
foundServices := false
for _, container := range containers {
if len(container.Ports) > 0 {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s📦 %s%s%s %s(%s)%s\n", Green, Bold, container.Name, NC, Cyan, cleanImage, NC)
for _, port := range container.Ports {
ipv6Marker := ""
if port.IPv6 {
ipv6Marker = " [IPv6]"
}
fmt.Printf("%s ├─ Port %s%d%s%s → %d (%s)%s%s\n",
Cyan, Bold, port.HostPort, NC, Cyan, port.ContainerPort, port.Protocol, ipv6Marker, NC)
}
fmt.Println()
foundServices = true
}
}
// Check for host networking containers
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s%sHost Networking Containers:%s\n", Yellow, Bold, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s🌐 %s%s%s %s(%s)%s %s- uses host networking%s\n",
Yellow, Bold, container.Name, NC, Cyan, cleanImage, NC, Yellow, NC)
}
fmt.Println()
foundServices = true
}
if !foundServices {
fmt.Printf("%sNo Docker services with exposed ports found%s\n", Yellow, NC)
}
}
func checkPort(port int) {
// Check if port is in use first
if !isPortInUse(port) {
fmt.Printf("%s✗ Port %d is FREE%s\n", Red, port, NC)
os.Exit(1)
}
// Port is in use, now find what's using it
process := findProcessUsingPort(port)
if process != nil {
dockerInfo := ""
if process.DockerInfo != "" {
dockerInfo = " " + process.DockerInfo
}
fmt.Printf("%s✓ Port %d (%s) in use by %s%s%s %sas PID %s%d%s%s\n",
Green, port, process.Protocol, Bold, process.ProcessName, NC, Green, Bold, process.PID, NC, dockerInfo)
return
}
// Check if it's a Docker container
containerInfo := findDockerContainerUsingPort(port)
if containerInfo != "" {
fmt.Printf("%s✓ Port %d in use by Docker container %s\n", Green, port, containerInfo)
return
}
// If we still haven't found the process, check for host networking containers more thoroughly
hostNetworkProcess := findHostNetworkingProcess(port)
if hostNetworkProcess != "" {
fmt.Printf("%s✓ Port %d likely in use by %s\n", Green, port, hostNetworkProcess)
return
}
// If we still haven't found the process
fmt.Printf("%s⚠ Port %d is in use but unable to identify the process%s\n", Yellow, port, NC)
if isDockerAvailable() {
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s Note: Found Docker containers using host networking:%s\n", Cyan, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s - %s (%s)%s\n", Cyan, container.Name, cleanImage, NC)
}
fmt.Printf("%s These containers share the host's network, so one of them might be using this port%s\n", Cyan, NC)
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
}
func isPortInUse(port int) bool {
// Try ss first
if isCommandAvailable("ss") {
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
// Try netstat as fallback
if isCommandAvailable("netstat") {
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
return false
}
func findProcessUsingPort(port int) *ProcessInfo {
// Method 1: Try netstat
if process := tryNetstat(port); process != nil {
return process
}
// Method 2: Try ss
if process := trySS(port); process != nil {
return process
}
// Method 3: Try lsof
if process := tryLsof(port); process != nil {
return process
}
// Method 4: Try fuser
if process := tryFuser(port); process != nil {
return process
}
return nil
}
func tryNetstat(port int) *ProcessInfo {
if !isCommandAvailable("netstat") {
return nil
}
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "netstat", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
fields := strings.Fields(line)
if len(fields) >= 7 {
pidProcess := fields[6]
parts := strings.Split(pidProcess, "/")
if len(parts) >= 2 {
if pid, err := strconv.Atoi(parts[0]); err == nil {
processName := parts[1]
protocol := fields[0]
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func trySS(port int) *ProcessInfo {
if !isCommandAvailable("ss") {
return nil
}
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "ss", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
pidRegex := regexp.MustCompile(`pid=(\d+)`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
matches := pidRegex.FindStringSubmatch(line)
if len(matches) >= 2 {
if pid, err := strconv.Atoi(matches[1]); err == nil {
processName := getProcessName(pid)
if processName != "" {
fields := strings.Fields(line)
protocol := ""
if len(fields) > 0 {
protocol = fields[0]
}
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func tryLsof(port int) *ProcessInfo {
if !isCommandAvailable("lsof") {
return nil
}
cmd := exec.Command("lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "LISTEN") {
fields := strings.Fields(line)
if len(fields) >= 2 {
processName := fields[0]
if pid, err := strconv.Atoi(fields[1]); err == nil {
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: dockerInfo,
}
}
}
}
}
return nil
}
func tryFuser(port int) *ProcessInfo {
if !isCommandAvailable("fuser") {
return nil
}
cmd := exec.Command("fuser", fmt.Sprintf("%d/tcp", port))
output, err := cmd.Output()
if err != nil {
return nil
}
pids := strings.Fields(string(output))
for _, pidStr := range pids {
if pid, err := strconv.Atoi(strings.TrimSpace(pidStr)); err == nil {
processName := getProcessName(pid)
if processName != "" {
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: "",
}
}
}
}
return nil
}
func getProcessName(pid int) string {
cmd := exec.Command("ps", "-p", strconv.Itoa(pid), "-o", "comm=")
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getDockerInfo(pid int, processName string, port int) string {
if !isDockerAvailable() {
return ""
}
// Check if it's docker-proxy (handle truncated names like "docker-pr")
if processName == "docker-proxy" || strings.HasPrefix(processName, "docker-pr") {
containerName := getContainerByPublishedPort(port)
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s(Docker: %s, image: %s)%s", Cyan, containerName, cleanImage, NC)
}
return fmt.Sprintf("%s(Docker proxy)%s", Cyan, NC)
}
// Check if process is in a Docker container using cgroup
containerInfo := getContainerByPID(pid)
if containerInfo != "" {
return fmt.Sprintf("%s(Docker: %s)%s", Cyan, containerInfo, NC)
}
// Check if this process might be in a host networking container
hostContainer := checkHostNetworkingContainer(pid, processName)
if hostContainer != "" {
return fmt.Sprintf("%s(Docker host network: %s)%s", Cyan, hostContainer, NC)
}
return ""
}
func getContainerByPID(pid int) string {
cgroupPath := fmt.Sprintf("/proc/%d/cgroup", pid)
file, err := os.Open(cgroupPath)
if err != nil {
return ""
}
defer file.Close()
scanner := bufio.NewScanner(file)
containerIDRegex := regexp.MustCompile(`[a-f0-9]{64}`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "docker") {
matches := containerIDRegex.FindStringSubmatch(line)
if len(matches) > 0 {
containerID := matches[0]
containerName := getContainerNameByID(containerID)
if containerName != "" {
return containerName
}
return containerID[:12]
}
}
}
return ""
}
func findDockerContainerUsingPort(port int) string {
if !isDockerAvailable() {
return ""
}
// Check for containers with published ports
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
containerName := strings.TrimSpace(string(output))
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s%s%s %s(published port, image: %s)%s", Bold, containerName, NC, Cyan, cleanImage, NC)
}
return ""
}
func isDockerAvailable() bool {
return isCommandAvailable("docker")
}
func isCommandAvailable(command string) bool {
_, err := exec.LookPath(command)
return err == nil
}
func getRunningContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Ports: getContainerPorts(containerName),
}
containers = append(containers, container)
}
}
return containers
}
func getHostNetworkingContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", "network=host")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Network: "host",
}
containers = append(containers, container)
}
}
return containers
}
func getContainerImage(containerName string) string {
cmd := exec.Command("docker", "inspect", containerName)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if image, ok := inspectData[0]["Config"].(map[string]interface{})["Image"].(string); ok {
return image
}
}
return ""
}
func getContainerPorts(containerName string) []PortMapping {
cmd := exec.Command("docker", "port", containerName)
output, err := cmd.Output()
if err != nil {
return nil
}
var ports []PortMapping
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> 0\.0\.0\.0:(\d+)`)
ipv6PortRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> \[::\]:(\d+)`)
for scanner.Scan() {
line := scanner.Text()
// Check for IPv4
if matches := portRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: false,
})
}
// Check for IPv6
if matches := ipv6PortRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: true,
})
}
}
return ports
}
func getContainerByPublishedPort(port int) string {
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getContainerNameByID(containerID string) string {
cmd := exec.Command("docker", "inspect", containerID)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if name, ok := inspectData[0]["Name"].(string); ok {
return strings.TrimPrefix(name, "/")
}
}
return ""
}
func cleanImageName(image string) string {
// Remove SHA256 hashes
shaRegex := regexp.MustCompile(`sha256:[a-f0-9]*`)
cleaned := shaRegex.ReplaceAllString(image, "[image-hash]")
// Remove registry prefixes, keep only the last part
parts := strings.Split(cleaned, "/")
if len(parts) > 0 {
return parts[len(parts)-1]
}
return cleaned
}
func findHostNetworkingProcess(port int) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Check if this container might be using the port
if isContainerUsingPort(container.Name, port) {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s%s%s %s(Docker host network: %s)%s", Bold, container.Name, NC, Cyan, cleanImage, NC)
}
}
return ""
}
func isContainerUsingPort(containerName string, port int) bool {
// Try to execute netstat inside the container to see if it's listening on the port
cmd := exec.Command("docker", "exec", containerName, "sh", "-c",
fmt.Sprintf("netstat -tlnp 2>/dev/null | grep ':%d ' || ss -tlnp 2>/dev/null | grep ':%d '", port, port))
output, err := cmd.Output()
if err != nil {
return false
}
return len(output) > 0
}
func checkHostNetworkingContainer(pid int, processName string) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers and check if any match this process
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Try to find this process inside the container
cmd := exec.Command("docker", "exec", container.Name, "sh", "-c",
fmt.Sprintf("ps -o pid,comm | grep '%s' | grep -q '%d\\|%s'", processName, pid, processName))
err := cmd.Run()
if err == nil {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s (%s)", container.Name, cleanImage)
}
}
return ""
}

View File

@@ -1,249 +0,0 @@
#!/usr/bin/env python3
import os
import subprocess
import argparse
import requests
def get_physical_interfaces():
"""
Retrieve a list of physical network interfaces on the system.
This function checks the `/sys/class/net/` directory to identify physical
network interfaces. It determines if an interface is physical by verifying
the presence of a symbolic link to a `device` directory.
Returns:
list: A list of strings, where each string is the name of a physical
network interface.
"""
interfaces_path = '/sys/class/net/'
physical_interfaces = []
for interface in os.listdir(interfaces_path):
if not os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
physical_interfaces.append(interface)
return physical_interfaces
def get_virtual_interfaces():
"""
Retrieves a list of virtual network interfaces on the system.
This function scans the network interfaces available in the '/sys/class/net/'
directory and filters out physical interfaces and the loopback interface ('lo').
It identifies virtual interfaces by checking if the 'device' path is not a
symbolic link.
Returns:
list: A list of virtual network interface names as strings.
"""
interfaces_path = '/sys/class/net/'
virtual_interfaces = []
for interface in os.listdir(interfaces_path):
if os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
if interface == 'lo':
continue
virtual_interfaces.append(interface)
return virtual_interfaces
def get_up_interfaces(interfaces):
"""
Filters the given list of interfaces to include only those that are up or unknown.
Args:
interfaces (list): A list of interface names.
Returns:
list: A list of interface names that are up or treated as up (e.g., UNKNOWN).
"""
up_interfaces = []
for interface in interfaces:
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
if "state UP" in result.stdout or "state UNKNOWN" in result.stdout:
up_interfaces.append(interface)
except Exception:
continue
return up_interfaces
def get_interface_state(interface):
"""
Retrieve the state and MAC address of a network interface.
Args:
interface (str): The name of the network interface.
Returns:
tuple: A tuple containing the state (str) and MAC address (str) of the interface.
"""
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
lines = result.stdout.splitlines()
state = "UNKNOWN"
mac = "N/A"
if len(lines) > 0:
if "state UP" in lines[0]:
state = "UP"
elif "state DOWN" in lines[0]:
state = "DOWN"
elif "state UNKNOWN" in lines[0]:
state = "UP" # Treat UNKNOWN as UP
if len(lines) > 1:
mac = lines[1].strip().split()[1] if len(lines[1].strip().split()) > 1 else "N/A"
return state, mac
except Exception:
return "UNKNOWN", "N/A"
def get_external_ips():
"""
Fetch both IPv4 and IPv6 external IP addresses of the machine.
This function first attempts to retrieve an IP address using the services
`https://ifconfig.co`, `https://ifconfig.io`, and `https://ifconfig.me`. If the
first IP fetched is IPv6, it explicitly tries to fetch an IPv4 address using
curl's `-4` option.
Returns:
tuple: A tuple containing the IPv4 and IPv6 addresses as strings. If either
address cannot be fetched, it will be set to "Unavailable".
"""
services = ["https://ip.mvl.sh", "https://ifconfig.co", "https://api.ipify.org", "https://myexternalip.com/raw", "https://ifconfig.io", "https://ifconfig.me"]
headers = {"User-Agent": "curl"}
ipv4, ipv6 = "Unavailable", "Unavailable"
for service in services:
try:
response = requests.get(service, headers=headers, timeout=0.2)
if response.status_code == 200:
ip = response.text.strip()
if ":" in ip: # IPv6 address
ipv6 = ip
# Try to fetch IPv4 explicitly
ipv4_response = subprocess.run(
["curl", "-4", "--silent", service],
capture_output=True,
text=True,
timeout=0.2,
check=True
)
if ipv4_response.returncode == 0:
ipv4 = ipv4_response.stdout.strip()
else: # IPv4 address
ipv4 = ip
if ipv4 != "Unavailable" and ipv6 != "Unavailable":
break
except (requests.RequestException, subprocess.TimeoutExpired):
continue
return ipv4, ipv6
def display_interface_details(show_physical=False, show_virtual=False, show_all=False, show_external_ip=False, show_ipv6=False):
"""
Display details of network interfaces based on the specified flags.
Args:
show_physical (bool): Show physical interfaces (UP by default unless combined with show_all).
show_virtual (bool): Show virtual interfaces (UP by default unless combined with show_all).
show_all (bool): Include all interfaces (UP, DOWN, UNKNOWN).
show_external_ip (bool): Fetch and display the external IP address.
show_ipv6 (bool): Include IPv6 addresses in the output.
Notes:
- By default, only IPv4 addresses are shown unless `-6` is specified.
- IPv6 addresses are displayed in a separate column if `-6` is specified.
"""
if show_external_ip:
ipv4, ipv6 = get_external_ips()
print(f"External IPv4: {ipv4}")
print(f"External IPv6: {ipv6}")
print("-" * 70)
interfaces = []
if show_all:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_physical_interfaces())
if show_virtual:
interfaces.extend(get_virtual_interfaces())
else:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_up_interfaces(get_physical_interfaces()))
if show_virtual or not show_physical: # Default to virtual if no `-p`
interfaces.extend(get_up_interfaces(get_virtual_interfaces()))
interfaces.sort()
# Define column widths based on expected maximum content length
col_widths = {
'interface': 15,
'ipv4': 18,
'ipv6': 40 if show_ipv6 else 0, # Hide IPv6 column if not showing IPv6
'subnet': 10,
'state': 10,
'mac': 18
}
# Print header with proper formatting
header = f"{'Interface':<{col_widths['interface']}} {'IPv4 Address':<{col_widths['ipv4']}}"
if show_ipv6:
header += f" {'IPv6 Address':<{col_widths['ipv6']}}"
header += f" {'Subnet':<{col_widths['subnet']}} {'State':<{col_widths['state']}} {'MAC Address':<{col_widths['mac']}}"
print(header)
print("-" * (col_widths['interface'] + col_widths['ipv4'] + (col_widths['ipv6'] if show_ipv6 else 0) + col_widths['subnet'] + col_widths['state'] + col_widths['mac']))
for interface in interfaces:
try:
result = subprocess.run(['ip', '-br', 'addr', 'show', interface],
capture_output=True, text=True, check=True)
state, mac = get_interface_state(interface)
if result.returncode == 0:
lines = result.stdout.strip().splitlines()
ipv4 = "N/A"
ipv6 = "N/A"
subnet = ""
for line in lines:
parts = line.split()
if len(parts) >= 3:
ip_with_mask = parts[2]
# Check if the address is IPv4 or IPv6
if ":" in ip_with_mask: # IPv6
ipv6 = ip_with_mask.split('/')[0]
else: # IPv4
ipv4 = ip_with_mask.split('/')[0]
subnet = ip_with_mask.split('/')[1] if '/' in ip_with_mask else ""
row = f"{interface:<{col_widths['interface']}} {ipv4:<{col_widths['ipv4']}}"
if show_ipv6:
row += f" {ipv6:<{col_widths['ipv6']}}"
row += f" {subnet:<{col_widths['subnet']}} {state:<{col_widths['state']}} {mac:<{col_widths['mac']}}"
print(row)
except Exception as e:
print(f"Error fetching details for {interface}: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Display network interface information')
parser.add_argument('-p', action='store_true', help='Show physical interfaces (UP by default)')
parser.add_argument('-v', action='store_true', help='Show virtual interfaces (UP by default)')
parser.add_argument('-a', action='store_true', help='Include all interfaces (UP, DOWN, UNKNOWN)')
parser.add_argument('-e', action='store_true', help='Fetch and display the external IP address')
parser.add_argument('--ipv6', '-6', action='store_true', help='Include IPv6 addresses in the output')
args = parser.parse_args()
# Default to showing both UP physical and virtual interfaces if no flags are specified
display_interface_details(show_physical=args.p or not (args.p or args.v or args.a or args.e),
show_virtual=args.v or not (args.p or args.v or args.a or args.e),
show_all=args.a,
show_external_ip=args.e,
show_ipv6=args.ipv6)

View File

@@ -1,298 +0,0 @@
#!/bin/bash
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
KOBOLD_PATH="/mnt/data/ai/llm/koboldcpp-linux-x64"
KOBOLD_MODEL="/mnt/data/ai/llm/Mistral-Small-24B-Instruct-2501-Q4_K_M.gguf" # Default model
SILLYTAVERN_SCREEN="sillytavern"
KOBOLD_SCREEN="koboldcpp"
# Function to check if a screen session exists
check_screen() {
screen -ls | grep -q "\.${1}\s"
}
# Function to list available models
list_models() {
echo -e "${BLUE}Available models:${NC}"
ls -1 /mnt/data/ai/llm/*.gguf | nl -w2 -s'. '
}
# Function to select a model
select_model() {
list_models
echo
read -p "Select model number (or press Enter for default): " model_num
if [[ -z "$model_num" ]]; then
echo -e "${YELLOW}Using default model: $(basename "$KOBOLD_MODEL")${NC}"
else
selected_model=$(ls -1 /mnt/data/ai/llm/*.gguf | sed -n "${model_num}p")
if [[ -n "$selected_model" ]]; then
KOBOLD_MODEL="$selected_model"
echo -e "${GREEN}Selected model: $(basename "$KOBOLD_MODEL")${NC}"
else
echo -e "${RED}Invalid selection. Using default model.${NC}"
fi
fi
}
# Function to start SillyTavern
start_sillytavern() {
echo -e "${YELLOW}Starting SillyTavern in screen session '${SILLYTAVERN_SCREEN}'...${NC}"
screen -dmS "$SILLYTAVERN_SCREEN" bash -c "sillytavern --listen 0.0.0.0"
sleep 2
if check_screen "$SILLYTAVERN_SCREEN"; then
echo -e "${GREEN}✓ SillyTavern started successfully!${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:8000${NC}"
else
echo -e "${RED}✗ Failed to start SillyTavern${NC}"
fi
}
# Function to start KoboldCPP
start_koboldcpp() {
select_model
echo -e "${YELLOW}Starting KoboldCPP in screen session '${KOBOLD_SCREEN}'...${NC}"
screen -dmS "$KOBOLD_SCREEN" bash -c "cd /mnt/data/ai/llm && ./koboldcpp-linux-x64 --model '$KOBOLD_MODEL' --host 0.0.0.0 --port 5001 --contextsize 8192 --gpulayers 999"
sleep 2
if check_screen "$KOBOLD_SCREEN"; then
echo -e "${GREEN}✓ KoboldCPP started successfully!${NC}"
echo -e "${BLUE} Model: $(basename "$KOBOLD_MODEL")${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:5001${NC}"
else
echo -e "${RED}✗ Failed to start KoboldCPP${NC}"
fi
}
# Function to stop a service
stop_service() {
local service=$1
local screen_name=$2
echo -e "${YELLOW}Stopping ${service}...${NC}"
screen -S "$screen_name" -X quit
sleep 1
if ! check_screen "$screen_name"; then
echo -e "${GREEN}✓ ${service} stopped successfully${NC}"
else
echo -e "${RED}✗ Failed to stop ${service}${NC}"
fi
}
# Function to show service status
show_status() {
echo -e "${CYAN}╔═══════════════════════════════════════╗${NC}"
echo -e "${CYAN}║ Service Status Overview ║${NC}"
echo -e "${CYAN}╚═══════════════════════════════════════╝${NC}"
echo
local st_running=false
local kc_running=false
# Check SillyTavern
if check_screen "$SILLYTAVERN_SCREEN"; then
st_running=true
echo -e " ${GREEN}●${NC} SillyTavern: ${GREEN}Running${NC} (screen: ${SILLYTAVERN_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:8000${NC}"
else
echo -e " ${RED}●${NC} SillyTavern: ${RED}Not running${NC}"
fi
echo
# Check KoboldCPP
if check_screen "$KOBOLD_SCREEN"; then
kc_running=true
echo -e " ${GREEN}●${NC} KoboldCPP: ${GREEN}Running${NC} (screen: ${KOBOLD_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:5001${NC}"
else
echo -e " ${RED}●${NC} KoboldCPP: ${RED}Not running${NC}"
fi
echo
}
# Function to handle service management
manage_services() {
local st_running=$(check_screen "$SILLYTAVERN_SCREEN" && echo "true" || echo "false")
local kc_running=$(check_screen "$KOBOLD_SCREEN" && echo "true" || echo "false")
# If both services are running
if [[ "$st_running" == "true" ]] && [[ "$kc_running" == "true" ]]; then
echo -e "${GREEN}Both services are running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Attach to KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Restart KoboldCPP"
echo "5) Stop all services"
echo "6) Exit"
read -p "Your choice (1-6): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
5)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
6)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only SillyTavern is running
elif [[ "$st_running" == "true" ]]; then
echo -e "${YELLOW}Only SillyTavern is running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Start KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Stop SillyTavern"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
start_koboldcpp
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only KoboldCPP is running
elif [[ "$kc_running" == "true" ]]; then
echo -e "${YELLOW}Only KoboldCPP is running.${NC}"
echo
echo "1) Attach to KoboldCPP"
echo "2) Start SillyTavern"
echo "3) Restart KoboldCPP"
echo "4) Stop KoboldCPP"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
2)
start_sillytavern
;;
3)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If no services are running
else
echo -e "${YELLOW}No services are running.${NC}"
echo
echo "1) Start both services"
echo "2) Start SillyTavern only"
echo "3) Start KoboldCPP only"
echo "4) Exit"
read -p "Your choice (1-4): " choice
case $choice in
1)
start_sillytavern
echo
start_koboldcpp
;;
2)
start_sillytavern
;;
3)
start_koboldcpp
;;
4)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
fi
}
# Main script
echo -e "${BLUE}╔═══════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ LLM Services Manager ║${NC}"
echo -e "${BLUE}╚═══════════════════════════════════════╝${NC}"
echo
# Show status
show_status
# Show separator and manage services
echo -e "${CYAN}═══════════════════════════════════════${NC}"
manage_services
echo
echo -e "${BLUE}Quick reference:${NC}"
echo "• List sessions: screen -ls"
echo "• Attach: screen -r <name>"
echo "• Detach: Ctrl+A then D"

View File

@@ -1,119 +0,0 @@
# SSH Utility - Smart SSH Connection Manager
A transparent SSH wrapper that automatically chooses between local and remote connections based on network connectivity.
## What it does
This utility acts as a drop-in replacement for the `ssh` command that intelligently routes connections:
- When you type `ssh desktop`, it automatically checks if your local network is available
- If local: connects via `desktop-local` (faster local connection)
- If remote: connects via `desktop` (Tailscale/VPN connection)
- All other SSH usage passes through unchanged (`ssh --help`, `ssh user@host`, etc.)
## Installation
The utility is automatically compiled and installed to `~/.local/bin/ssh` via Ansible when you run your dotfiles setup.
## Configuration
1. Copy the example config:
```bash
mkdir -p ~/.config/ssh-util
cp ~/.dotfiles/config/ssh-util/config.yaml ~/.config/ssh-util/
```
2. Edit `~/.config/ssh-util/config.yaml` to match your setup:
```yaml
smart_aliases:
desktop:
primary: "desktop-local" # SSH config entry for local connection
fallback: "desktop" # SSH config entry for remote connection
check_host: "192.168.86.22" # IP to ping for connectivity test
timeout: "2s" # Ping timeout
```
3. Ensure your `~/.ssh/config` contains the referenced host entries:
```
Host desktop
HostName mennos-desktop
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
Host desktop-local
HostName 192.168.86.22
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
```
## Usage
Once configured, simply use SSH as normal:
```bash
# Smart connection - automatically chooses local vs remote
ssh desktop
# All other SSH usage works exactly the same
ssh --help
ssh --version
ssh user@example.com
ssh -L 8080:localhost:80 server
```
## How it works
1. When you run `ssh <alias>`, the utility checks if `<alias>` is defined in the smart_aliases config
2. If yes, it pings the `check_host` IP address
3. If ping succeeds: executes `ssh <primary>` instead
4. If ping fails: executes `ssh <fallback>` instead
5. If not a smart alias: passes through to real SSH unchanged
## Troubleshooting
### SSH utility not found
Make sure `~/.local/bin` is in your PATH:
```bash
echo $PATH | grep -o ~/.local/bin
```
### Config not loading
Check the config file exists and has correct syntax:
```bash
ls -la ~/.config/ssh-util/config.yaml
cat ~/.config/ssh-util/config.yaml
```
### Connectivity test failing
Test manually:
```bash
ping -c 1 -W 2 192.168.86.22
```
### Falls back to real SSH
If there are any errors loading config or parsing, the utility safely falls back to executing the real SSH binary at `/usr/bin/ssh`.
## Adding more aliases
To add more smart aliases, just extend the config:
```yaml
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.86.22"
timeout: "2s"
server:
primary: "server-local"
fallback: "server-remote"
check_host: "192.168.1.100"
timeout: "1s"
```
Remember to create the corresponding entries in your `~/.ssh/config`.

View File

@@ -1,102 +0,0 @@
# SSH Utility Configuration
# This file defines smart aliases that automatically choose between local and remote connections
# Logging configuration
logging:
enabled: true
# Levels: debug, info, warn, error
level: "info"
# Formats: console, json
format: "console"
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.1.250"
timeout: "2s"
server:
primary: "server-local"
fallback: "server"
check_host: "192.168.1.254"
timeout: "2s"
laptop:
primary: "laptop-local"
fallback: "laptop"
check_host: "192.168.1.253"
timeout: "2s"
rtlsdr:
primary: "rtlsdr-local"
fallback: "rtlsdr"
check_host: "192.168.1.252"
timeout: "2s"
# Background SSH Tunnel Definitions
tunnels:
# Example: Desktop database tunnel
desktop-database:
type: local
local_port: 5432
remote_host: database
remote_port: 5432
ssh_host: desktop # Uses smart alias logic (desktop-local/desktop)
# Example: Development API tunnel
dev-api:
type: local
local_port: 8080
remote_host: api
remote_port: 80
ssh_host: dev-server
# Example: SOCKS proxy tunnel
socks-proxy:
type: dynamic
local_port: 1080
ssh_host: bastion
# Modem web interface tunnel
modem-web:
type: local
local_port: 8443
remote_host: 192.168.1.1
remote_port: 443
ssh_host: desktop
# Tunnel Management Commands:
# ssh --tunnel --open desktop-database (or ssh -TO desktop-database)
# ssh --tunnel --close desktop-database (or ssh -TC desktop-database)
# ssh --tunnel --list (or ssh -TL)
#
# Ad-hoc tunnels (not in config):
# ssh -TO temp-api --local 8080:api:80 --via server
# Logging options:
# - enabled: true/false - whether to show any logs
# - level: debug (verbose), info (normal), warn (warnings only), error (errors only)
# - format: console (human readable), json (structured)
# Logs are written to stderr so they don't interfere with SSH output
# How it works:
# 1. When you run: ssh desktop
# 2. The utility pings 192.168.86.22 with a 2s timeout
# 3. If ping succeeds: runs "ssh desktop-local" instead
# 4. If ping fails: runs "ssh desktop" instead
# 5. All other SSH usage (flags, user@host, etc.) passes through unchanged
# Your SSH config should contain the actual host definitions:
# Host desktop
# HostName mennos-desktop
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes
#
# Host desktop-local
# HostName 192.168.86.22
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes

View File

@@ -1,20 +0,0 @@
module ssh-util
go 1.21
require (
github.com/jedib0t/go-pretty/v6 v6.4.9
github.com/rs/zerolog v1.31.0
github.com/spf13/cobra v1.8.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.12.0 // indirect
)

View File

@@ -1,46 +0,0 @@
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jedib0t/go-pretty/v6 v6.4.9 h1:vZ6bjGg2eBSrJn365qlxGcaWu09Id+LHtrfDWlB2Usc=
github.com/jedib0t/go-pretty/v6 v6.4.9/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM=
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,51 +0,0 @@
---
- name: WSL2 1Password SSH Agent Bridge
block:
- name: Ensure required packages are installed for 1Password sock bridge
ansible.builtin.package:
name:
# 1Password (WSL2 required package for sock bridge)
- socat
state: present
become: true
- name: Ensure .1password directory exists in home
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.1password"
state: directory
mode: '0700'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
- name: Create .agent-bridge.sh in home directory
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.agent-bridge.sh"
mode: '0755'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
content: |
# Code extracted from https://stuartleeks.com/posts/wsl-ssh-key-forward-to-windows/
# (IMPORTANT) Create the folder on your root for the `agent.sock` (How mentioned by @rfay and @Lochnair in the comments)
mkdir -p ~/.1password
# Configure ssh forwarding
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
# need `ps -ww` to get non-truncated command for matching
# use square brackets to generate a regex match for the process we want but that doesn't match the grep command running it!
ALREADY_RUNNING=$(ps -auxww | grep -q "[n]piperelay.exe -ei -s //./pipe/openssh-ssh-agent"; echo $?)
if [[ $ALREADY_RUNNING != "0" ]]; then
if [[ -S $SSH_AUTH_SOCK ]]; then
# not expecting the socket to exist as the forwarding command isn't running (http://www.tldp.org/LDP/abs/html/fto.html)
echo "removing previous socket..."
rm $SSH_AUTH_SOCK
fi
echo "Starting SSH-Agent relay..."
# setsid to force new session to keep running
# set socat to listen on $SSH_AUTH_SOCK and forward to npiperelay which then forwards to openssh-ssh-agent on windows
(setsid socat UNIX-LISTEN:$SSH_AUTH_SOCK,fork EXEC:"npiperelay.exe -ei -s //./pipe/openssh-ssh-agent",nofork &) >/dev/null 2>&1
fi
tags:
- wsl
- wsl2

View File

@@ -1,93 +0,0 @@
---
- name: Borg Backup Installation and Configuration
block:
- name: Check if Borg is already installed
ansible.builtin.command: which borg
register: borg_check
ignore_errors: true
changed_when: false
- name: Ensure Borg is installed
ansible.builtin.package:
name: borg
state: present
become: true
when: borg_check.rc != 0
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg directories
ansible.builtin.file:
path: "{{ borg_dir }}"
state: directory
mode: "0755"
loop:
- "{{ borg_config_dir }}"
- "/mnt/object_storage"
loop_control:
loop_var: borg_dir
become: true
- name: Check if Borg repository exists
ansible.builtin.stat:
path: "{{ borg_repo_dir }}/config"
register: borg_repo_check
become: true
- name: Initialize Borg repository
ansible.builtin.command: >
borg init --encryption=repokey {{ borg_repo_dir }}
environment:
BORG_PASSPHRASE: "{{ borg_passphrase }}"
become: true
when: not borg_repo_check.stat.exists
- name: Create Borg backup script
ansible.builtin.template:
src: templates/borg-backup.sh.j2
dest: "{{ borg_config_dir }}/backup.sh"
mode: "0755"
become: true
- name: Create Borg systemd service
ansible.builtin.template:
src: templates/borg-backup.service.j2
dest: /etc/systemd/system/borg-backup.service
mode: "0644"
become: true
register: borg_service
- name: Create Borg systemd timer
ansible.builtin.template:
src: templates/borg-backup.timer.j2
dest: /etc/systemd/system/borg-backup.timer
mode: "0644"
become: true
register: borg_timer
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: borg_service.changed or borg_timer.changed
- name: Enable and start Borg backup timer
ansible.builtin.systemd:
name: borg-backup.timer
enabled: true
state: started
become: true
- name: Display Borg backup status
ansible.builtin.debug:
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
tags:
- borg-backup
- borg
- backup

View File

@@ -1,95 +0,0 @@
---
- name: Borg Local Sync Installation and Configuration
block:
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg local sync script
template:
src: borg-local-sync.sh.j2
dest: /usr/local/bin/borg-local-sync.sh
mode: "0755"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- name: Create Borg local sync systemd service
template:
src: borg-local-sync.service.j2
dest: /etc/systemd/system/borg-local-sync.service
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
tags:
- borg-local-sync
- name: Create Borg local sync systemd timer
template:
src: borg-local-sync.timer.j2
dest: /etc/systemd/system/borg-local-sync.timer
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
- restart borg-local-sync-timer
tags:
- borg-local-sync
- name: Create log file for Borg local sync
file:
path: /var/log/borg-local-sync.log
state: touch
owner: root
group: root
mode: "0644"
become: yes
tags:
- borg-local-sync
- name: Enable and start Borg local sync timer
systemd:
name: borg-local-sync.timer
enabled: yes
state: started
daemon_reload: yes
become: yes
tags:
- borg-local-sync
- name: Add logrotate configuration for Borg local sync
copy:
content: |
/var/log/borg-local-sync.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 root root
}
dest: /etc/logrotate.d/borg-local-sync
mode: "0644"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- borg
- backup
tags:
- borg-local-sync
- borg
- backup

View File

@@ -1,88 +0,0 @@
---
- name: Dynamic DNS setup
block:
- name: Create systemd environment file for dynamic DNS
ansible.builtin.template:
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
dest: "/etc/dynamic-dns-systemd.env"
mode: "0600"
owner: root
group: root
become: true
- name: Create dynamic DNS wrapper script
ansible.builtin.copy:
dest: "/usr/local/bin/dynamic-dns-update.sh"
mode: "0755"
content: |
#!/bin/bash
# Run dynamic DNS update (binary compiled by utils.yml)
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl,sathub.de,sathub.nl" 2>&1 | logger -t dynamic-dns
become: true
- name: Create dynamic DNS systemd timer
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.timer"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update Timer
Requires=dynamic-dns.service
[Timer]
OnCalendar=*:0/15
Persistent=true
[Install]
WantedBy=timers.target
become: true
register: ddns_timer
- name: Create dynamic DNS systemd service
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.service"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/dynamic-dns-update.sh
EnvironmentFile=/etc/dynamic-dns-systemd.env
User={{ ansible_user }}
Group={{ ansible_user }}
[Install]
WantedBy=multi-user.target
become: true
register: ddns_service
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: ddns_timer.changed or ddns_service.changed
- name: Enable and start dynamic DNS timer
ansible.builtin.systemd:
name: dynamic-dns.timer
enabled: true
state: started
become: true
- name: Display setup completion message
ansible.builtin.debug:
msg: |
Dynamic DNS setup complete!
- Systemd timer: sudo systemctl status dynamic-dns.timer
- Check logs: sudo journalctl -u dynamic-dns.service -f
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
when: inventory_hostname == 'mennos-server' or inventory_hostname == 'mennos-vps'
tags:
- dynamic-dns

View File

@@ -1,94 +0,0 @@
---
- name: JuiceFS Installation and Configuration
block:
- name: Check if JuiceFS is already installed
ansible.builtin.command: which juicefs
register: juicefs_check
ignore_errors: true
changed_when: false
- name: Install JuiceFS using the automatic installer
ansible.builtin.shell: curl -sSL https://d.juicefs.com/install | sh -
register: juicefs_installation
when: juicefs_check.rc != 0
become: true
- name: Verify JuiceFS installation
ansible.builtin.command: juicefs version
register: juicefs_version
changed_when: false
when: juicefs_check.rc != 0 or juicefs_installation.changed
- name: Create mount directory
ansible.builtin.file:
path: /mnt/object_storage
state: directory
mode: "0755"
become: true
- name: Create cache directory
ansible.builtin.file:
path: /var/jfsCache
state: directory
mode: "0755"
become: true
- name: Configure JuiceFS network performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "net.core.rmem_max", value: "16777216" }
- { name: "net.core.wmem_max", value: "16777216" }
- { name: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" }
- { name: "net.ipv4.tcp_wmem", value: "4096 65536 16777216" }
- name: Set JuiceFS facts
ansible.builtin.set_fact:
hetzner_access_key: "{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_ACCESS_KEY_ID') }}"
hetzner_secret_key:
"{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_SECRET_ACCESS_KEY')
}}"
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
- name: Create JuiceFS systemd service file
ansible.builtin.template:
src: templates/juicefs.service.j2
dest: /etc/systemd/system/juicefs.service
owner: root
group: root
mode: "0644"
become: true
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
- name: Include JuiceFS Redis tasks
ansible.builtin.include_tasks: services/redis/redis.yml
when: inventory_hostname == 'mennos-server'
- name: Enable and start JuiceFS service
ansible.builtin.systemd:
name: juicefs.service
enabled: true
state: started
become: true
- name: Check if JuiceFS is mounted
ansible.builtin.shell: df -h | grep /mnt/object_storage
become: true
register: mount_check
ignore_errors: true
changed_when: false
- name: Display mount status
ansible.builtin.debug:
msg: "JuiceFS is successfully mounted at /mnt/object_storage"
when: mount_check.rc == 0
tags:
- juicefs

View File

@@ -1,165 +0,0 @@
---
- name: Server setup
block:
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure Borg is installed on Arch-based systems
ansible.builtin.package:
name: borg
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure Borg is installed on Debian/Ubuntu systems
ansible.builtin.package:
name: borgbackup
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Include JuiceFS tasks
ansible.builtin.include_tasks: juicefs.yml
tags:
- juicefs
- name: Include Dynamic DNS tasks
ansible.builtin.include_tasks: dynamic-dns.yml
tags:
- dynamic-dns
- name: Include Borg Backup tasks
ansible.builtin.include_tasks: borg-backup.yml
tags:
- borg-backup
- name: Include Borg Local Sync tasks
ansible.builtin.include_tasks: borg-local-sync.yml
tags:
- borg-local-sync
- name: System performance optimizations
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "fs.file-max", value: "2097152" } # Max open files for the entire system
- { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have
- { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory
- { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches
- { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket
- { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input
- { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state
- { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections
- name: Include service tasks
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
loop_control:
label: "{{ item.name }}"
tags:
- services
- always
vars:
services:
- name: dashy
enabled: true
hosts:
- mennos-server
- name: gitea
enabled: true
hosts:
- mennos-server
- name: factorio
enabled: true
hosts:
- mennos-server
- name: dozzle
enabled: true
hosts:
- mennos-server
- name: beszel
enabled: true
hosts:
- mennos-server
- name: caddy
enabled: true
hosts:
- mennos-server
- name: golink
enabled: true
hosts:
- mennos-server
- name: immich
enabled: true
hosts:
- mennos-server
- name: plex
enabled: true
hosts:
- mennos-server
- name: tautulli
enabled: true
hosts:
- mennos-server
- name: downloaders
enabled: true
hosts:
- mennos-server
- name: wireguard
enabled: true
hosts:
- mennos-server
- name: nextcloud
enabled: true
hosts:
- mennos-server
- name: cloudreve
enabled: true
hosts:
- mennos-server
- name: echoip
enabled: true
hosts:
- mennos-server
- name: arr-stack
enabled: true
hosts:
- mennos-server
- name: home-assistant
enabled: true
hosts:
- mennos-server
- name: privatebin
enabled: true
hosts:
- mennos-server
- name: unifi-network-application
enabled: true
hosts:
- mennos-server
- name: avorion
enabled: false
hosts:
- mennos-server
- name: sathub
enabled: true
hosts:
- mennos-server
- name: necesse
enabled: true
hosts:
- mennos-server

View File

@@ -1,38 +0,0 @@
---
- name: Deploy ArrStack service
block:
- name: Set ArrStack directories
ansible.builtin.set_fact:
arr_stack_service_dir: "{{ ansible_env.HOME }}/.services/arr-stack"
arr_stack_data_dir: "/mnt/services/arr-stack"
- name: Create ArrStack directory
ansible.builtin.file:
path: "{{ arr_stack_service_dir }}"
state: directory
mode: "0755"
- name: Create ArrStack data directory
ansible.builtin.file:
path: "{{ arr_stack_data_dir }}"
state: directory
mode: "0755"
- name: Deploy ArrStack docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ arr_stack_service_dir }}/docker-compose.yml"
mode: "0644"
register: arr_stack_template_result
- name: Stop ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" down --remove-orphans
when: arr_stack_template_result.changed
- name: Start ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" up -d
when: arr_stack_template_result.changed
tags:
- services
- arr_stack
- arr-stack

View File

@@ -1,182 +0,0 @@
name: arr-stack
services:
radarr:
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 7878:7878
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/radarr-config:/config
- /mnt/data:/mnt/data
restart: "unless-stopped"
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
sonarr:
image: linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/sonarr-config:/config
- /mnt/data:/mnt/data
ports:
- 8989:8989
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
bazarr:
image: ghcr.io/hotio/bazarr:latest
container_name: bazarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 6767:6767
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/bazarr-config:/config
- /mnt/data:/mnt/data
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 512M
prowlarr:
container_name: prowlarr
image: linuxserver/prowlarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/prowlarr-config:/config
extra_hosts:
- host.docker.internal:host-gateway
ports:
- 9696:9696
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 512M
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=Europe/Amsterdam
ports:
- "8191:8191"
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 1G
overseerr:
image: sctx/overseerr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/overseerr-config:/app/config
ports:
- 5055:5055
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
- caddy_network
deploy:
resources:
limits:
memory: 512M
tdarr:
image: ghcr.io/haveagitgat/tdarr:latest
container_name: tdarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- serverIP=0.0.0.0
- serverPort=8266
- webUIPort=8265
- internalNode=true
- inContainer=true
- ffmpegVersion=7
- nodeName=MyInternalNode
- auth=false
- openBrowser=true
- maxLogSizeMB=10
- cronPluginUpdate=
- NVIDIA_DRIVER_CAPABILITIES=all
- NVIDIA_VISIBLE_DEVICES=all
volumes:
- {{ arr_stack_data_dir }}/tdarr-server:/app/server
- {{ arr_stack_data_dir }}/tdarr-config:/app/configs
- {{ arr_stack_data_dir }}/tdarr-logs:/app/logs
- /mnt/data:/media
- {{ arr_stack_data_dir }}/tdarr-cache:/temp
ports:
- 8265:8265
- 8266:8266
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
runtime: nvidia
devices:
- /dev/dri:/dev/dri
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
networks:
arr_stack_net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Avorion service
block:
- name: Set Avorion directories
ansible.builtin.set_fact:
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
avorion_data_dir: "/mnt/services/avorion"
- name: Create Avorion directory
ansible.builtin.file:
path: "{{ avorion_service_dir }}"
state: directory
mode: "0755"
- name: Create Avorion data directory
ansible.builtin.file:
path: "{{ avorion_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Avorion docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ avorion_service_dir }}/docker-compose.yml"
mode: "0644"
register: avorion_compose
- name: Stop Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
when: avorion_compose.changed
- name: Start Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
when: avorion_compose.changed
tags:
- services
- avorion

View File

@@ -1,15 +0,0 @@
services:
avorion:
image: rfvgyhn/avorion:latest
volumes:
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
ports:
- 27000:27000
- 27000:27000/udp
- 27003:27003/udp
- 27020:27020/udp
- 27021:27021/udp
deploy:
resources:
limits:
memory: 4G

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Beszel service
block:
- name: Set Beszel directories
ansible.builtin.set_fact:
beszel_service_dir: "{{ ansible_env.HOME }}/.services/beszel"
beszel_data_dir: "/mnt/services/beszel"
- name: Create Beszel directory
ansible.builtin.file:
path: "{{ beszel_service_dir }}"
state: directory
mode: "0755"
- name: Create Beszel data directory
ansible.builtin.file:
path: "{{ beszel_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Beszel docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ beszel_service_dir }}/docker-compose.yml"
mode: "0644"
register: beszel_compose
- name: Stop Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" down --remove-orphans
when: beszel_compose.changed
- name: Start Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" up -d
when: beszel_compose.changed
tags:
- services
- beszel

View File

@@ -1,37 +0,0 @@
services:
beszel:
image: 'henrygd/beszel'
restart: unless-stopped
ports:
- '8090:8090'
volumes:
- {{beszel_data_dir}}/data:/beszel_data
- {{beszel_data_dir}}/socket:/beszel_socket
networks:
- beszel-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
beszel-agent:
image: henrygd/beszel-agent:latest
restart: unless-stopped
network_mode: host
volumes:
- {{beszel_data_dir}}/socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKkSIQDh1vS8lG+2Uw/9dK1eOgCHVCgQfP+Bfk4XPkdn'
deploy:
resources:
limits:
memory: 128M
networks:
beszel-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,285 +0,0 @@
# Global configuration for country blocking
{
servers {
protocols h1 h2 h3
}
}
# Country blocking snippet using MaxMind GeoLocation - reusable across all sites
{% if enable_country_blocking | default(false) and allowed_countries_codes | default([]) | length > 0 %}
(country_block) {
@allowed_local {
remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
}
@not_allowed_countries {
not remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
not {
maxmind_geolocation {
db_path "/etc/caddy/geoip/GeoLite2-Country.mmdb"
allow_countries {{ allowed_countries_codes | join(' ') }}
}
}
}
respond @not_allowed_countries "Access denied" 403
}
{% else %}
(country_block) {
# Country blocking disabled
}
{% endif %}
{% if inventory_hostname == 'mennos-server' %}
git.mvl.sh {
import country_block
reverse_proxy gitea:3000
tls {{ caddy_email }}
}
git.vleeuwen.me {
import country_block
redir https://git.mvl.sh{uri}
tls {{ caddy_email }}
}
df.mvl.sh {
import country_block
redir / https://git.mvl.sh/vleeuwenmenno/dotfiles/raw/branch/master/setup.sh
tls {{ caddy_email }}
}
fsm.mvl.sh {
import country_block
reverse_proxy factorio-server-manager:80
tls {{ caddy_email }}
}
fsm.vleeuwen.me {
import country_block
redir https://fsm.mvl.sh{uri}
tls {{ caddy_email }}
}
beszel.mvl.sh {
import country_block
reverse_proxy beszel:8090
tls {{ caddy_email }}
}
beszel.vleeuwen.me {
import country_block
redir https://beszel.mvl.sh{uri}
tls {{ caddy_email }}
}
sathub.de {
import country_block
handle {
reverse_proxy sathub-frontend:4173
}
# Enable compression
encode gzip
# Security headers
header {
X-Frame-Options "SAMEORIGIN"
X-Content-Type-Options "nosniff"
X-XSS-Protection "1; mode=block"
Referrer-Policy "strict-origin-when-cross-origin"
Strict-Transport-Security "max-age=31536000; includeSubDomains"
}
tls {{ caddy_email }}
}
api.sathub.de {
import country_block
reverse_proxy sathub-backend:4001
tls {{ caddy_email }}
}
sathub.nl {
import country_block
redir https://sathub.de{uri}
tls {{ caddy_email }}
}
photos.mvl.sh {
import country_block
reverse_proxy immich:2283
tls {{ caddy_email }}
}
photos.vleeuwen.me {
import country_block
redir https://photos.mvl.sh{uri}
tls {{ caddy_email }}
}
home.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
home.vleeuwen.me {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
unifi.mvl.sh {
reverse_proxy unifi-controller:8443 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh {
reverse_proxy unifi-controller:8843 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh:80 {
redir https://hotspot.mvl.sh{uri} permanent
}
bin.mvl.sh {
import country_block
reverse_proxy privatebin:8080
tls {{ caddy_email }}
}
ip.mvl.sh ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
http://ip.mvl.sh http://ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
}
overseerr.mvl.sh {
import country_block
reverse_proxy overseerr:5055
tls {{ caddy_email }}
}
overseerr.vleeuwen.me {
import country_block
redir https://overseerr.mvl.sh{uri}
tls {{ caddy_email }}
}
plex.mvl.sh {
import country_block
reverse_proxy host.docker.internal:32400 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
plex.vleeuwen.me {
import country_block
redir https://plex.mvl.sh{uri}
tls {{ caddy_email }}
}
tautulli.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8181 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
tautulli.vleeuwen.me {
import country_block
redir https://tautulli.mvl.sh{uri}
tls {{ caddy_email }}
}
cloud.mvl.sh {
import country_block
reverse_proxy cloudreve:5212 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
cloud.vleeuwen.me {
import country_block
redir https://cloud.mvl.sh{uri}
tls {{ caddy_email }}
}
collabora.mvl.sh {
import country_block
reverse_proxy collabora:9980 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
drive.mvl.sh drive.vleeuwen.me {
import country_block
# CalDAV and CardDAV redirects
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
# Handle other .well-known requests
handle /.well-known/* {
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
}
# Main reverse proxy configuration with proper headers
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
# Security headers
header {
# HSTS header for enhanced security (required by Nextcloud)
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
# Additional security headers recommended for Nextcloud
X-Content-Type-Options "nosniff"
X-Frame-Options "SAMEORIGIN"
Referrer-Policy "no-referrer"
X-XSS-Protection "1; mode=block"
X-Permitted-Cross-Domain-Policies "none"
X-Robots-Tag "noindex, nofollow"
}
tls {{ caddy_email }}
}
{% endif %}

View File

@@ -1,15 +0,0 @@
FROM caddy:2.9.1-builder AS builder
RUN xcaddy build \
--with github.com/porech/caddy-maxmind-geolocation
FROM caddy:2.9.1-alpine
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
# Create directory for MaxMind databases and logs
RUN mkdir -p /etc/caddy/geoip /var/log/caddy
EXPOSE 80 443
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]

View File

@@ -1,59 +0,0 @@
---
- name: Deploy Caddy service
block:
- name: Set Caddy directories
ansible.builtin.set_fact:
caddy_service_dir: "{{ ansible_env.HOME }}/.services/caddy"
caddy_data_dir: "/mnt/services/caddy"
geoip_db_path: "/mnt/services/echoip"
caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}"
- name: Create Caddy directory
ansible.builtin.file:
path: "{{ caddy_service_dir }}"
state: directory
mode: "0755"
- name: Setup country blocking
ansible.builtin.include_tasks: country-blocking.yml
- name: Copy Dockerfile for custom Caddy build
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
register: caddy_dockerfile
- name: Create Caddy network
ansible.builtin.command: docker network create caddy_default
register: create_caddy_network
failed_when:
- create_caddy_network.rc != 0
- "'already exists' not in create_caddy_network.stderr"
changed_when: create_caddy_network.rc == 0
- name: Deploy Caddy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ caddy_service_dir }}/docker-compose.yml"
mode: "0644"
register: caddy_compose
- name: Deploy Caddy Caddyfile
ansible.builtin.template:
src: Caddyfile.j2
dest: "{{ caddy_service_dir }}/Caddyfile"
mode: "0644"
register: caddy_file
- name: Stop Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
when: caddy_compose.changed or caddy_file.changed
- name: Start Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
when: caddy_compose.changed or caddy_file.changed
tags:
- caddy
- services
- reverse-proxy

View File

@@ -1,50 +0,0 @@
---
- name: Country blocking setup for Caddy with MaxMind GeoLocation
block:
- name: Copy Dockerfile for custom Caddy build with GeoIP
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
when: enable_country_blocking | default(false)
- name: Check if MaxMind Country database is available
ansible.builtin.stat:
path: "{{ geoip_db_path }}/GeoLite2-Country.mmdb"
register: maxmind_country_db
when: enable_country_blocking | default(false)
- name: Ensure log directory exists for Caddy
ansible.builtin.file:
path: "{{ caddy_data_dir }}/logs"
state: directory
mode: "0755"
become: true
when: enable_country_blocking | default(false)
- name: Display country blocking configuration
ansible.builtin.debug:
msg:
- "✅ Country blocking enabled: {{ enable_country_blocking | default(false) }}"
- "🛡️ Countries to allow: {{ allowed_countries_codes | default([]) | join(', ') }}"
- "📍 Using MaxMind GeoLocation plugin"
- "💾 Database path: /etc/caddy/geoip/GeoLite2-Country.mmdb"
- "📊 Database available: {{ maxmind_country_db.stat.exists | default(false) }}"
when: enable_country_blocking | default(false)
- name: Warn if MaxMind database not found
ansible.builtin.debug:
msg:
- "⚠️ WARNING: MaxMind Country database not found!"
- "Expected location: {{ geoip_db_path }}/GeoLite2-Country.mmdb"
- "Country blocking will not work until EchoIP service is deployed"
- "Run: dotf update --ansible --tags echoip"
when:
- enable_country_blocking | default(false)
- not maxmind_country_db.stat.exists | default(false)
tags:
- caddy
- security
- country-blocking
- geoip

View File

@@ -1,32 +0,0 @@
services:
caddy:
build:
context: .
dockerfile: Dockerfile
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- {{ caddy_data_dir }}/data:/data
- {{ caddy_data_dir }}/config:/config
- {{ caddy_service_dir }}/Caddyfile:/etc/caddy/Caddyfile
- {{ geoip_db_path }}:/etc/caddy/geoip:ro
- {{ caddy_data_dir }}/logs:/var/log/caddy
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
deploy:
resources:
limits:
memory: 512M
networks:
caddy_network:
name: caddy_default
enable_ipv6: true

View File

@@ -1,32 +0,0 @@
- name: Deploy Cloudreve service
tags:
- services
- cloudreve
block:
- name: Set Cloudreve directories
ansible.builtin.set_fact:
cloudreve_service_dir: "{{ ansible_env.HOME }}/.services/cloudreve"
cloudreve_data_dir: "/mnt/services/cloudreve"
- name: Create Cloudreve directory
ansible.builtin.file:
path: "{{ cloudreve_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Cloudreve docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ cloudreve_service_dir }}/docker-compose.yml"
mode: "0644"
register: cloudreve_compose
- name: Stop Cloudreve service
ansible.builtin.command: docker compose -f "{{ cloudreve_service_dir }}/docker-compose.yml" down --remove-orphans
changed_when: false
when: cloudreve_compose.changed
- name: Start Cloudreve service
ansible.builtin.command: docker compose -f "{{ cloudreve_service_dir }}/docker-compose.yml" up -d
changed_when: false
when: cloudreve_compose.changed

View File

@@ -1,67 +0,0 @@
services:
cloudreve:
image: cloudreve/cloudreve:latest
depends_on:
- postgresql
- redis
restart: always
ports:
- 5212:5212
networks:
- caddy_network
- cloudreve
environment:
- CR_CONF_Database.Type=postgres
- CR_CONF_Database.Host=postgresql
- CR_CONF_Database.User=cloudreve
- CR_CONF_Database.Name=cloudreve
- CR_CONF_Database.Port=5432
- CR_CONF_Redis.Server=redis:6379
volumes:
- {{ cloudreve_data_dir }}/data:/cloudreve/data
postgresql:
image: postgres:17
environment:
- POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve
- POSTGRES_HOST_AUTH_METHOD=trust
networks:
- cloudreve
volumes:
- {{ cloudreve_data_dir }}/postgres:/var/lib/postgresql/data
collabora:
image: collabora/code
restart: unless-stopped
ports:
- 9980:9980
environment:
- domain=collabora\\.mvl\\.sh
- username=admin
- password=Dt3hgIJOPr3rgh
- dictionaries=en_US
- TZ=Europe/Amsterdam
- extra_params=--o:ssl.enable=false --o:ssl.termination=true
networks:
- cloudreve
- caddy_network
deploy:
resources:
limits:
memory: 1G
redis:
image: redis:latest
networks:
- cloudreve
volumes:
- {{ cloudreve_data_dir }}/redis:/data
networks:
cloudreve:
name: cloudreve
driver: bridge
caddy_network:
name: caddy_default
external: true

View File

@@ -1,308 +0,0 @@
pageInfo:
title: Menno's Home
navLinks: []
sections:
- name: Selfhosted
items:
- title: Plex
icon: http://mennos-server:4000/assets/plex.svg
url: https://plex.mvl.sh
statusCheckUrl: https://plex.mvl.sh/identity
statusCheck: true
id: 0_1035_plex
- title: Tautulli
icon: http://mennos-server:4000/assets/tautulli.svg
url: https://tautulli.mvl.sh
id: 1_1035_tautulli
statusCheck: true
- title: Overseerr
icon: http://mennos-server:4000/assets/overseerr.svg
url: https://overseerr.mvl.sh
id: 2_1035_overseerr
statusCheck: true
- title: Immich
icon: http://mennos-server:4000/assets/immich.svg
url: https://photos.mvl.sh
id: 3_1035_immich
statusCheck: true
- title: Nextcloud
icon: http://mennos-server:4000/assets/nextcloud.svg
url: https://drive.mvl.sh
id: 3_1035_nxtcld
statusCheck: true
- title: ComfyUI
icon: http://mennos-server:8188/assets/favicon.ico
url: http://mennos-server:8188
statusCheckUrl: http://host.docker.internal:8188/api/system_stats
id: 3_1035_comfyui
statusCheck: true
displayData:
sortBy: default
rows: 1
cols: 2
collapsed: false
hideForGuests: false
- name: Media Management
items:
- title: Sonarr
icon: http://mennos-server:4000/assets/sonarr.svg
url: http://go/sonarr
id: 0_1533_sonarr
- title: Radarr
icon: http://mennos-server:4000/assets/radarr.svg
url: http://go/radarr
id: 1_1533_radarr
- title: Prowlarr
icon: http://mennos-server:4000/assets/prowlarr.svg
url: http://go/prowlarr
id: 2_1533_prowlarr
- title: Tdarr
icon: http://mennos-server:4000/assets/tdarr.png
url: http://go/tdarr
id: 3_1533_tdarr
- name: Kagi
items:
- title: Kagi Search
icon: favicon
url: https://kagi.com/
id: 0_380_kagisearch
- title: Kagi Translate
icon: favicon
url: https://translate.kagi.com/
id: 1_380_kagitranslate
- title: Kagi Assistant
icon: favicon
url: https://kagi.com/assistant
id: 2_380_kagiassistant
- name: News
items:
- title: Nu.nl
icon: http://mennos-server:4000/assets/nunl.svg
url: https://www.nu.nl/
id: 0_380_nu
- title: Tweakers.net
icon: favicon
url: https://www.tweakers.net/
id: 1_380_tweakers
- title: NL Times
icon: favicon
url: https://www.nltimes.nl/
id: 2_380_nl_times
- name: Downloaders
items:
- title: qBittorrent
icon: http://mennos-server:4000/assets/qbittorrent.svg
url: http://go/qbit
id: 0_1154_qbittorrent
tags:
- download
- torrent
- yarr
- title: Sabnzbd
icon: http://mennos-server:4000/assets/sabnzbd.svg
url: http://go/sabnzbd
id: 1_1154_sabnzbd
tags:
- download
- nzb
- yarr
- name: Git
items:
- title: GitHub
icon: http://mennos-server:4000/assets/github.svg
url: https://github.com/vleeuwenmenno
id: 0_292_github
tags:
- repo
- git
- hub
- title: Gitea
icon: http://mennos-server:4000/assets/gitea.svg
url: http://git.mvl.sh/vleeuwenmenno
id: 1_292_gitea
tags:
- repo
- git
- tea
- name: Server Monitoring
items:
- title: Beszel
icon: http://mennos-server:4000/assets/beszel.svg
url: http://go/beszel
tags:
- monitoring
- logs
id: 0_1725_beszel
- title: Dozzle
icon: http://mennos-server:4000/assets/dozzle.svg
url: http://go/dozzle
id: 1_1725_dozzle
tags:
- monitoring
- logs
- title: UpDown.io Status
icon: far fa-signal
url: http://go/status
id: 2_1725_updowniostatus
tags:
- monitoring
- logs
- name: Tools
items:
- title: Home Assistant
icon: http://mennos-server:4000/assets/home-assistant.svg
url: http://go/homeassistant
id: 0_529_homeassistant
- title: Tailscale
icon: http://mennos-server:4000/assets/tailscale.svg
url: http://go/tailscale
id: 1_529_tailscale
- title: GliNet KVM
icon: http://mennos-server:4000/assets/glinet.svg
url: http://go/glkvm
id: 2_529_glinetkvm
- title: Unifi Network Controller
icon: http://mennos-server:4000/assets/unifi.svg
url: http://go/unifi
id: 3_529_unifinetworkcontroller
- title: Dashboard Icons
icon: favicon
url: https://dashboardicons.com/
id: 4_529_dashboardicons
- name: Weather
items:
- title: Buienradar
icon: favicon
url: https://www.buienradar.nl/weer/Beverwijk/NL/2758998
id: 0_529_buienradar
- title: ClearOutside
icon: favicon
url: https://clearoutside.com/forecast/52.49/4.66
id: 1_529_clearoutside
- title: Windy
icon: favicon
url: https://www.windy.com/
id: 2_529_windy
- title: Meteoblue
icon: favicon
url: https://www.meteoblue.com/en/country/weather/radar/the-netherlands_the-netherlands_2750405
id: 2_529_meteoblue
- name: DiscountOffice
displayData:
sortBy: default
rows: 1
cols: 3
collapsed: false
hideForGuests: false
items:
- title: DiscountOffice.nl
icon: favicon
url: https://discountoffice.nl/
id: 0_1429_discountofficenl
tags:
- do
- discount
- work
- title: DiscountOffice.be
icon: favicon
url: https://discountoffice.be/
id: 1_1429_discountofficebe
tags:
- do
- discount
- work
- title: Admin NL
icon: favicon
url: https://discountoffice.nl/administrator
id: 2_1429_adminnl
tags:
- do
- discount
- work
- title: Admin BE
icon: favicon
url: https://discountoffice.be/administrator
id: 3_1429_adminbe
tags:
- do
- discount
- work
- title: Subsites
icon: favicon
url: https://elastomappen.nl
id: 4_1429_subsites
tags:
- do
- discount
- work
- title: Proxmox
icon: http://mennos-server:4000/assets/proxmox.svg
url: https://www.transip.nl/cp/vps/prm/350680/
id: 5_1429_proxmox
tags:
- do
- discount
- work
- title: Transip
icon: favicon
url: https://www.transip.nl/cp/vps/prm/350680/
id: 6_1429_transip
tags:
- do
- discount
- work
- title: Kibana
icon: http://mennos-server:4000/assets/kibana.svg
url: http://go/kibana
id: 7_1429_kibana
tags:
- do
- discount
- work
appConfig:
layout: auto
iconSize: large
theme: nord
startingView: default
defaultOpeningMethod: sametab
statusCheck: false
statusCheckInterval: 0
routingMode: history
enableMultiTasking: false
widgetsAlwaysUseProxy: false
webSearch:
disableWebSearch: false
searchEngine: https://kagi.com/search?q=
openingMethod: newtab
searchBangs: {}
enableFontAwesome: true
enableMaterialDesignIcons: false
hideComponents:
hideHeading: false
hideNav: true
hideSearch: false
hideSettings: true
hideFooter: false
auth:
enableGuestAccess: false
users: []
enableOidc: false
oidc:
adminRole: "false"
adminGroup: "false"
enableHeaderAuth: false
headerAuth:
userHeader: REMOTE_USER
proxyWhitelist: []
enableKeycloak: false
showSplashScreen: false
preventWriteToDisk: false
preventLocalSave: false
disableConfiguration: false
disableConfigurationForNonAdmin: false
allowConfigEdit: true
enableServiceWorker: false
disableContextMenu: false
disableUpdateChecks: false
disableSmartSort: false
enableErrorReporting: false

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Dashy service
block:
- name: Set Dashy directories
ansible.builtin.set_fact:
dashy_service_dir: "{{ ansible_env.HOME }}/.services/dashy"
dashy_data_dir: "/mnt/services/dashy"
- name: Create Dashy directory
ansible.builtin.file:
path: "{{ dashy_service_dir }}"
state: directory
mode: "0755"
- name: Create Dashy data directory
ansible.builtin.file:
path: "{{ dashy_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dashy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dashy_service_dir }}/docker-compose.yml"
mode: "0644"
register: dashy_compose
- name: Deploy Dashy config.yml
ansible.builtin.template:
src: conf.yml.j2
dest: "{{ dashy_data_dir }}/conf.yml"
mode: "0644"
register: dashy_config
- name: Stop Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" down --remove-orphans
when: dashy_compose.changed
- name: Start Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" up -d
when: dashy_compose.changed
tags:
- services
- dashy

View File

@@ -1,21 +0,0 @@
services:
dashy:
image: lissy93/dashy:latest
restart: unless-stopped
ports:
- 4000:8080
volumes:
- {{dashy_data_dir}}/:/app/user-data
networks:
- caddy_network
extra_hosts:
- host.docker.internal:host-gateway
deploy:
resources:
limits:
memory: 2G
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,71 +0,0 @@
name: downloaders
services:
gluetun:
image: qmcgaw/gluetun:latest
privileged: true
cap_add:
- NET_ADMIN
networks:
- arr_stack_net
ports:
- 6881:6881
- 6881:6881/udp
- 8085:8085 # Qbittorrent
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- {{ downloaders_data_dir }}/gluetun-config:/gluetun
environment:
- PUID=1000
- PGID=100
- VPN_SERVICE_PROVIDER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='VPN_SERVICE_PROVIDER') }}
- OPENVPN_USER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_USER') }}
- OPENVPN_PASSWORD={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_PASSWORD') }}
- SERVER_COUNTRIES={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='SERVER_COUNTRIES') }}
restart: always
deploy:
resources:
limits:
memory: 512M
sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/sabnzbd-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
restart: unless-stopped
ports:
- 7788:8080
deploy:
resources:
limits:
memory: 1G
qbittorrent:
image: lscr.io/linuxserver/qbittorrent
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=100
- WEBUI_PORT=8085
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/qbit-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
depends_on:
gluetun:
condition: service_healthy
restart: always
deploy:
resources:
limits:
memory: 1G
networks:
arr_stack_net:
external: true
name: arr_stack_net

View File

@@ -1,47 +0,0 @@
---
- name: Deploy Downloaders service
block:
- name: Set Downloaders directories
ansible.builtin.set_fact:
local_data_dir: "/mnt/data"
downloaders_service_dir: "{{ ansible_env.HOME }}/.services/downloaders"
downloaders_data_dir: "/mnt/services/downloaders"
- name: Create Downloaders directory
ansible.builtin.file:
path: "{{ downloaders_data_dir }}"
state: directory
mode: "0755"
- name: Create Downloaders service directory
ansible.builtin.file:
path: "{{ downloaders_service_dir }}"
state: directory
mode: "0755"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: true
- name: Deploy Downloaders docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ downloaders_service_dir }}/docker-compose.yml"
mode: "0644"
register: downloaders_compose
- name: Ensure arr_stack_net Docker network exists
community.docker.docker_network:
name: arr_stack_net
driver: bridge
state: present
- name: Stop Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" down --remove-orphans
when: downloaders_compose.changed
- name: Start Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" up -d
when: downloaders_compose.changed
tags:
- services
- downloaders

View File

@@ -1,23 +0,0 @@
services:
dozzle:
image: amir20/dozzle:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8800:8080
environment:
- DOZZLE_NO_ANALYTICS=true
restart: unless-stopped
networks:
- dozzle-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
networks:
dozzle-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Dozzle service
block:
- name: Set Dozzle directories
ansible.builtin.set_fact:
dozzle_service_dir: "{{ ansible_env.HOME }}/.services/dozzle"
dozzle_data_dir: "/mnt/services/dozzle"
- name: Create Dozzle directory
ansible.builtin.file:
path: "{{ dozzle_service_dir }}"
state: directory
mode: "0755"
- name: Create Dozzle data directory
ansible.builtin.file:
path: "{{ dozzle_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dozzle docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dozzle_service_dir }}/docker-compose.yml"
mode: "0644"
register: dozzle_compose
- name: Stop Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" down --remove-orphans
when: dozzle_compose.changed
- name: Start Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" up -d
when: dozzle_compose.changed
tags:
- services
- dozzle

View File

@@ -1,27 +0,0 @@
services:
echoip:
container_name: 'echoip'
image: 'mpolden/echoip:latest'
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
volumes:
- {{echoip_data_dir}}/GeoLite2-ASN.mmdb:/opt/echoip/GeoLite2-ASN.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-City.mmdb:/opt/echoip/GeoLite2-City.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-Country.mmdb:/opt/echoip/GeoLite2-Country.mmdb:ro
command: >
-p -r -H "X-Forwarded-For" -l ":8080"
-a /opt/echoip/GeoLite2-ASN.mmdb
-c /opt/echoip/GeoLite2-City.mmdb
-f /opt/echoip/GeoLite2-Country.mmdb
deploy:
resources:
limits:
memory: 128M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,169 +0,0 @@
---
- name: Deploy EchoIP service
block:
- name: Set EchoIP directories
ansible.builtin.set_fact:
echoip_service_dir: "{{ ansible_env.HOME }}/.services/echoip"
echoip_data_dir: "/mnt/services/echoip"
maxmind_account_id:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='account_id') | regex_replace('\\s+', '') }}"
maxmind_license_key:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='license_key') | regex_replace('\\s+', '') }}"
# Requires: gather_facts: true in playbook
- name: Check last update marker file
ansible.builtin.stat:
path: "{{ echoip_data_dir }}/.last_update"
register: echoip_update_marker
- name: Determine if update is needed (older than 24h or missing)
ansible.builtin.set_fact:
update_needed: "{{ (not echoip_update_marker.stat.exists) or ((ansible_date_time.epoch | int) - (echoip_update_marker.stat.mtime | default(0) | int) > 86400) }}"
- name: Create EchoIP directory
ansible.builtin.file:
path: "{{ echoip_service_dir }}"
state: directory
mode: "0755"
- name: Create EchoIP data directory
ansible.builtin.file:
path: "{{ echoip_data_dir }}"
state: directory
mode: "0755"
# Only update databases if needed (max once per 24h)
- block:
# Touch the marker file BEFORE attempting download to prevent repeated attempts on failure
- name: Update last update marker file (pre-download)
ansible.builtin.file:
path: "{{ echoip_data_dir }}/.last_update"
state: touch
# Create directories for extracted databases
- name: Create directory for ASN database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN"
state: directory
mode: "0755"
- name: Create directory for City database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City"
state: directory
mode: "0755"
- name: Create directory for Country database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country"
state: directory
mode: "0755"
# Download all databases
- name: Download GeoLite2 ASN database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
mode: "0644"
- name: Download GeoLite2 City database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
mode: "0644"
- name: Download GeoLite2 Country database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
mode: "0644"
# Extract all databases
- name: Extract GeoLite2 ASN database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN"
remote_src: true
register: asn_extracted
- name: Extract GeoLite2 City database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City"
remote_src: true
register: city_extracted
- name: Extract GeoLite2 Country database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country"
remote_src: true
register: country_extracted
# Move all databases to the correct locations
- name: Move ASN database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-ASN -name GeoLite2-ASN.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-ASN.mmdb \\;"
when: asn_extracted.changed
- name: Move City database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-City -name GeoLite2-City.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-City.mmdb \\;"
when: city_extracted.changed
- name: Move Country database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-Country -name GeoLite2-Country.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-Country.mmdb \\;"
when: country_extracted.changed
# Clean up unnecessary files
- name: Remove downloaded tar.gz files
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
state: absent
- name: Remove extracted ASN folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-ASN"
- name: Remove downloaded City tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
state: absent
- name: Remove extracted City folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-City"
- name: Remove downloaded Country tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
state: absent
- name: Remove extracted Country folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-Country"
# Update the marker file (no longer needed here, already touched before download)
when: update_needed
# Deploy and restart the EchoIP service
- name: Deploy EchoIP docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ echoip_service_dir }}/docker-compose.yml"
mode: "0644"
register: echoip_compose
- name: Stop EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" down --remove-orphans
when: echoip_compose.changed
- name: Start EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" up -d
when: echoip_compose.changed
tags:
- services
- echoip

View File

@@ -1,31 +0,0 @@
services:
factorio-server-manager:
image: "ofsm/ofsm:latest"
restart: "unless-stopped"
environment:
- PUID=1000
- PGID=100
- "FACTORIO_VERSION=stable"
- "RCON_PASS=458fc84534"
ports:
- "5080:80"
- "34197:34197/udp"
volumes:
- {{ factorio_data_dir }}/fsm-data:/opt/fsm-data
- {{ factorio_data_dir }}/factorio-data/saves:/opt/factorio/saves
- {{ factorio_data_dir }}/factorio-data/mods:/opt/factorio/mods
- {{ factorio_data_dir }}/factorio-data/config:/opt/factorio/config
- {{ factorio_data_dir }}/factorio-data/mod_packs:/opt/fsm/mod_packs
networks:
- factorio
- caddy_network
deploy:
resources:
limits:
memory: 2G
networks:
factorio:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,31 +0,0 @@
---
- name: Deploy Factorio service
block:
- name: Set Factorio directories
ansible.builtin.set_fact:
factorio_service_dir: "{{ ansible_env.HOME }}/.services/factorio"
factorio_data_dir: "/mnt/services/factorio"
- name: Create Factorio directory
ansible.builtin.file:
path: "{{ factorio_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Factorio docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ factorio_service_dir }}/docker-compose.yml"
mode: "0644"
register: factorio_compose
- name: Stop Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" down --remove-orphans
when: factorio_compose.changed
- name: Start Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" up -d
when: factorio_compose.changed
tags:
- services
- factorio

View File

@@ -1,98 +0,0 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent: /tmp/act_runner

View File

@@ -1,66 +0,0 @@
services:
gitea:
image: gitea/gitea:latest
restart: always
environment:
- PUID=1000
- PGID=100
volumes:
- {{gitea_data_dir}}/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3030:3000"
- "22:22"
networks:
- gitea
- caddy_network
deploy:
resources:
limits:
memory: 1G
postgres:
image: postgres:15-alpine
restart: always
environment:
- PUID=1000
- PGID=100
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='POSTGRES_PASSWORD') }}
- POSTGRES_DB=gitea
volumes:
- {{gitea_data_dir}}/postgres:/var/lib/postgresql/data
networks:
- gitea
deploy:
resources:
limits:
memory: 1G
act_runner:
image: gitea/act_runner:latest
volumes:
- {{gitea_service_dir}}/act-runner-config.yaml:/config.yaml
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/act_runner:/tmp/act_runner
environment:
- PUID=1000
- PGID=100
- GITEA_INSTANCE_URL=https://git.mvl.sh
- GITEA_RUNNER_REGISTRATION_TOKEN={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='GITEA_RUNNER_REGISTRATION_TOKEN') }}
- GITEA_RUNNER_NAME=act-worker
- CONFIG_FILE=/config.yaml
restart: always
networks:
- gitea
deploy:
resources:
limits:
memory: 2G
networks:
gitea:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Gitea service
block:
- name: Set Gitea directories
ansible.builtin.set_fact:
gitea_data_dir: "/mnt/services/gitea"
gitea_service_dir: "{{ ansible_env.HOME }}/.services/gitea"
- name: Create Gitea directories
ansible.builtin.file:
path: "{{ gitea_dir }}"
state: directory
mode: "0755"
loop:
- "{{ gitea_data_dir }}"
- "{{ gitea_service_dir }}"
loop_control:
loop_var: gitea_dir
- name: Deploy Gitea docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ gitea_service_dir }}/docker-compose.yml"
mode: "0644"
register: gitea_compose
- name: Deploy Gitea act-runner-config.yaml
ansible.builtin.template:
src: act-runner-config.yaml.j2
dest: "{{ gitea_service_dir }}/act-runner-config.yaml"
mode: "0644"
register: gitea_act_runner_config
- name: Stop Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" down --remove-orphans
when: gitea_compose.changed or gitea_act_runner_config.changed
- name: Start Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" up -d
when: gitea_compose.changed or gitea_act_runner_config.changed
tags:
- services
- gitea

View File

@@ -1,14 +0,0 @@
name: golink
services:
server:
image: ghcr.io/tailscale/golink:main
user: root
environment:
- TS_AUTHKEY={{ lookup('community.general.onepassword', 'GoLink', vault='Dotfiles', field='TS_AUTHKEY') }}
volumes:
- {{ golink_data_dir }}:/home/nonroot
restart: "unless-stopped"
deploy:
resources:
limits:
memory: 256M

View File

@@ -1,36 +0,0 @@
---
- name: Deploy GoLink service
block:
- name: Set GoLink directories
ansible.builtin.set_fact:
golink_data_dir: "/mnt/services/golink"
golink_service_dir: "{{ ansible_env.HOME }}/.services/golink"
- name: Create GoLink directories
ansible.builtin.file:
path: "{{ golink_dir }}"
state: directory
mode: "0755"
loop:
- "{{ golink_data_dir }}"
- "{{ golink_service_dir }}"
loop_control:
loop_var: golink_dir
- name: Deploy GoLink docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ golink_service_dir }}/docker-compose.yml"
mode: "0644"
register: golink_compose
- name: Stop GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" down --remove-orphans
when: golink_compose.changed
- name: Start GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" up -d
when: golink_compose.changed
tags:
- services
- golink

View File

@@ -1,21 +0,0 @@
services:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- "/var/run/dbus:/run/dbus:ro"
- {{ homeassistant_data_dir }}:/config
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=1000
restart: unless-stopped
privileged: true
network_mode: host
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
deploy:
resources:
limits:
memory: 2G

View File

@@ -1,36 +0,0 @@
---
- name: Deploy Home Assistant service
block:
- name: Set Home Assistant directories
ansible.builtin.set_fact:
homeassistant_data_dir: "/mnt/services/homeassistant"
homeassistant_service_dir: "{{ ansible_env.HOME }}/.services/homeassistant"
- name: Create Home Assistant directories
ansible.builtin.file:
path: "{{ homeassistant_dir }}"
state: directory
mode: "0755"
loop:
- "{{ homeassistant_data_dir }}"
- "{{ homeassistant_service_dir }}"
loop_control:
loop_var: homeassistant_dir
- name: Deploy Home Assistant docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ homeassistant_service_dir }}/docker-compose.yml"
mode: "0644"
register: homeassistant_compose
- name: Stop Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" down --remove-orphans
when: homeassistant_compose.changed
- name: Start Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" up -d
when: homeassistant_compose.changed
tags:
- services
- homeassistant

View File

@@ -1,123 +0,0 @@
services:
immich:
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
volumes:
- {{ immich_data_dir }}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
- caddy_network
runtime: nvidia
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
volumes:
- model-cache:/cache
env_file:
- .env
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
runtime: nvidia
deploy:
resources:
limits:
memory: 8G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 1G
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
environment:
PUID: 1000
PGID: 1000
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- {{ immich_database_dir }}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 2G
volumes:
model-cache:
networks:
immich:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,10 +0,0 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Europe/Amsterdam
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
DB_USERNAME=postgres
DB_PASSWORD=postgres
DB_DATABASE_NAME=immich

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Immich service
block:
- name: Set Immich directories
ansible.builtin.set_fact:
immich_data_dir: "/mnt/data/photos/immich-library"
immich_database_dir: "/mnt/services/immich/postgres"
immich_service_dir: "{{ ansible_env.HOME }}/.services/immich"
- name: Create Immich directories
ansible.builtin.file:
path: "{{ immich_dir }}"
state: directory
mode: "0755"
loop:
- "{{ immich_data_dir }}"
- "{{ immich_service_dir }}"
loop_control:
loop_var: immich_dir
- name: Deploy Immich docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ immich_service_dir }}/docker-compose.yml"
mode: "0644"
register: immich_compose
- name: Deploy Immich .env
ansible.builtin.template:
src: dotenv.j2
dest: "{{ immich_service_dir }}/.env"
mode: "0644"
register: immich_compose
- name: Stop Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" down --remove-orphans
when: immich_compose.changed
- name: Start Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" up -d
when: immich_compose.changed
tags:
- services
- immich

View File

@@ -1,15 +0,0 @@
services:
necesse:
image: brammys/necesse-server
container_name: necesse
restart: unless-stopped
ports:
- "14159:14159/udp"
environment:
- MOTD=StarDebris' Server!
- PASSWORD=2142
- SLOTS=4
- PAUSE=1
volumes:
- {{ necesse_data_dir }}/saves:/necesse/saves
- {{ necesse_data_dir }}/logs:/necesse/logs

View File

@@ -1,41 +0,0 @@
---
- name: Deploy Necesse service
block:
- name: Set Necesse directories
ansible.builtin.set_fact:
necesse_service_dir: "{{ ansible_env.HOME }}/.services/necesse"
necesse_data_dir: "/mnt/services/necesse"
- name: Create Necesse service directory
ansible.builtin.file:
path: "{{ necesse_service_dir }}"
state: directory
mode: "0755"
- name: Create Necesse data directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0755"
loop:
- "{{ necesse_data_dir }}"
- "{{ necesse_data_dir }}/saves"
- "{{ necesse_data_dir }}/logs"
- name: Deploy Necesse docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ necesse_service_dir }}/docker-compose.yml"
mode: "0644"
register: necesse_compose
- name: Stop Necesse service
ansible.builtin.command: docker compose -f "{{ necesse_service_dir }}/docker-compose.yml" down --remove-orphans
when: necesse_compose.changed
- name: Start Necesse service
ansible.builtin.command: docker compose -f "{{ necesse_service_dir }}/docker-compose.yml" up -d
when: necesse_compose.changed
tags:
- services
- necesse

View File

@@ -1,73 +0,0 @@
services:
nextcloud:
image: nextcloud
container_name: nextcloud
restart: unless-stopped
networks:
- nextcloud
- caddy_network
depends_on:
- nextclouddb
- redis
ports:
- 8081:80
volumes:
- {{ nextcloud_data_dir }}/nextcloud/html:/var/www/html
- {{ nextcloud_data_dir }}/nextcloud/custom_apps:/var/www/html/custom_apps
- {{ nextcloud_data_dir }}/nextcloud/config:/var/www/html/config
- {{ nextcloud_data_dir }}/nextcloud/data:/var/www/html/data
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_HOST=nextclouddb
- REDIS_HOST=redis
deploy:
resources:
limits:
memory: 2G
nextclouddb:
image: mariadb:11.4.7
container_name: nextcloud-db
restart: unless-stopped
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
networks:
- nextcloud
volumes:
- {{ nextcloud_data_dir }}/database:/var/lib/mysql
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_RANDOM_ROOT_PASSWORD=true
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
deploy:
resources:
limits:
memory: 1G
redis:
image: redis:alpine
container_name: redis
volumes:
- {{ nextcloud_data_dir }}/redis:/data
networks:
- nextcloud
deploy:
resources:
limits:
memory: 512M
networks:
nextcloud:
name: nextcloud
driver: bridge
caddy_network:
name: caddy_default
external: true

View File

@@ -1,31 +0,0 @@
---
- name: Deploy Nextcloud service
block:
- name: Set Nextcloud directories
ansible.builtin.set_fact:
nextcloud_service_dir: "{{ ansible_env.HOME }}/.services/nextcloud"
nextcloud_data_dir: "/mnt/services/nextcloud"
- name: Create Nextcloud directory
ansible.builtin.file:
path: "{{ nextcloud_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Nextcloud docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ nextcloud_service_dir }}/docker-compose.yml"
mode: "0644"
register: nextcloud_compose
- name: Stop Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" down --remove-orphans
when: nextcloud_compose.changed
- name: Start Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" up -d
when: nextcloud_compose.changed
tags:
- services
- nextcloud

View File

@@ -1,29 +0,0 @@
services:
plex:
image: lscr.io/linuxserver/plex:latest
network_mode: host
restart: unless-stopped
runtime: nvidia
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- VERSION=docker
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
volumes:
- {{ plex_data_dir }}/config:/config
- {{ plex_data_dir }}/transcode:/transcode
- /mnt/data/movies:/movies
- /mnt/data/tvshows:/tvshows
- /mnt/object_storage/tvshows:/tvshows_slow
- /mnt/data/music:/music
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]

View File

@@ -1,36 +0,0 @@
---
- name: Deploy Plex service
block:
- name: Set Plex directories
ansible.builtin.set_fact:
plex_data_dir: "/mnt/services/plex"
plex_service_dir: "{{ ansible_env.HOME }}/.services/plex"
- name: Create Plex directories
ansible.builtin.file:
path: "{{ plex_dir }}"
state: directory
mode: "0755"
loop:
- "{{ plex_data_dir }}"
- "{{ plex_service_dir }}"
loop_control:
loop_var: plex_dir
- name: Deploy Plex docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ plex_service_dir }}/docker-compose.yml"
mode: "0644"
register: plex_compose
- name: Stop Plex service
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" down --remove-orphans
when: plex_compose.changed
- name: Start Plex service
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" up -d
when: plex_compose.changed
tags:
- services
- plex

View File

@@ -1,300 +0,0 @@
;<?php http_response_code(403); /*
; config file for PrivateBin
;
; An explanation of each setting can be find online at https://github.com/PrivateBin/PrivateBin/wiki/Configuration.
[main]
; (optional) set a project name to be displayed on the website
; name = "PrivateBin"
; The full URL, with the domain name and directories that point to the
; PrivateBin files, including an ending slash (/). This URL is essential to
; allow Opengraph images to be displayed on social networks.
basepath = "https://bin.mvl.sh/"
; enable or disable the discussion feature, defaults to true
discussion = false
; preselect the discussion feature, defaults to false
opendiscussion = false
; enable or disable the display of dates & times in the comments, defaults to true
; Note that internally the creation time will still get tracked in order to sort
; the comments by creation time, but you can choose not to display them.
; discussiondatedisplay = false
; enable or disable the password feature, defaults to true
password = true
; enable or disable the file upload feature, defaults to false
fileupload = false
; preselect the burn-after-reading feature, defaults to false
burnafterreadingselected = false
; which display mode to preselect by default, defaults to "plaintext"
; make sure the value exists in [formatter_options]
defaultformatter = "plaintext"
; (optional) set a syntax highlighting theme, as found in css/prettify/
; syntaxhighlightingtheme = "sons-of-obsidian"
; size limit per paste or comment in bytes, defaults to 10 Mebibytes
sizelimit = 10485760
; by default PrivateBin use "bootstrap" template (tpl/bootstrap.php).
; Optionally you can enable the template selection menu, which uses
; a session cookie to store the choice until the browser is closed.
templateselection = false
; List of available for selection templates when "templateselection" option is enabled
availabletemplates[] = "bootstrap5"
availabletemplates[] = "bootstrap"
availabletemplates[] = "bootstrap-page"
availabletemplates[] = "bootstrap-dark"
availabletemplates[] = "bootstrap-dark-page"
availabletemplates[] = "bootstrap-compact"
availabletemplates[] = "bootstrap-compact-page"
; set the template your installs defaults to, defaults to "bootstrap" (tpl/bootstrap.php), also
; bootstrap variants: "bootstrap-dark", "bootstrap-compact", "bootstrap-page",
; which can be combined with "-dark" and "-compact" for "bootstrap-dark-page",
; "bootstrap-compact-page" and finally "bootstrap5" (tpl/bootstrap5.php) - previews at:
; https://privatebin.info/screenshots.html
; template = "bootstrap"
; (optional) info text to display
; use single, instead of double quotes for HTML attributes
;info = "More information on the <a href='https://privatebin.info/'>project page</a>."
; (optional) notice to display
; notice = "Note: This is a test service: Data may be deleted anytime. Kittens will die if you abuse this service."
; by default PrivateBin will guess the visitors language based on the browsers
; settings. Optionally you can enable the language selection menu, which uses
; a session cookie to store the choice until the browser is closed.
languageselection = false
; set the language your installs defaults to, defaults to English
; if this is set and language selection is disabled, this will be the only language
; languagedefault = "en"
; (optional) URL shortener address to offer after a new paste is created.
; It is suggested to only use this with self-hosted shorteners as this will leak
; the pastes encryption key.
; urlshortener = "https://shortener.example.com/api?link="
; (optional) Let users create a QR code for sharing the paste URL with one click.
; It works both when a new paste is created and when you view a paste.
qrcode = true
; (optional) Let users send an email sharing the paste URL with one click.
; It works both when a new paste is created and when you view a paste.
; email = true
; (optional) IP based icons are a weak mechanism to detect if a comment was from
; a different user when the same username was used in a comment. It might get
; used to get the IP of a comment poster if the server salt is leaked and a
; SHA512 HMAC rainbow table is generated for all (relevant) IPs.
; Can be set to one these values:
; "none" / "identicon" / "jdenticon" (default) / "vizhash".
; icon = "none"
; Content Security Policy headers allow a website to restrict what sources are
; allowed to be accessed in its context. You need to change this if you added
; custom scripts from third-party domains to your templates, e.g. tracking
; scripts or run your site behind certain DDoS-protection services.
; Check the documentation at https://content-security-policy.com/
; Notes:
; - If you use the bootstrap5 theme, you must change default-src to 'self' to
; enable display of the svg icons
; - By default this disallows to load images from third-party servers, e.g. when
; they are embedded in pastes. If you wish to allow that, you can adjust the
; policy here. See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-not-it-load-embedded-images
; for details.
; - The 'wasm-unsafe-eval' is used to enable webassembly support (used for zlib
; compression). You can remove it if compression doesn't need to be supported.
; - The 'unsafe-inline' style-src is used by Chrome when displaying PDF previews
; and can be omitted if attachment upload is disabled (which is the default).
; See https://issues.chromium.org/issues/343754409
; - To allow displaying PDF previews in Firefox or Chrome, sandboxing must also
; get turned off. The following CSP allows PDF previews:
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self' 'unsafe-inline'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:"
;
; The recommended and default used CSP is:
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'wasm-unsafe-eval'; style-src 'self'; font-src 'self'; frame-ancestors 'none'; frame-src blob:; img-src 'self' data: blob:; media-src blob:; object-src blob:; sandbox allow-same-origin allow-scripts allow-forms allow-modals allow-downloads"
; stay compatible with PrivateBin Alpha 0.19, less secure
; if enabled will use base64.js version 1.7 instead of 2.1.9 and sha1 instead of
; sha256 in HMAC for the deletion token
; zerobincompatibility = false
; Enable or disable the warning message when the site is served over an insecure
; connection (insecure HTTP instead of HTTPS), defaults to true.
; Secure transport methods like Tor and I2P domains are automatically whitelisted.
; It is **strongly discouraged** to disable this.
; See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-it-show-me-an-error-about-an-insecure-connection for more information.
; httpwarning = true
; Pick compression algorithm or disable it. Only applies to pastes/comments
; created after changing the setting.
; Can be set to one these values: "none" / "zlib" (default).
; compression = "zlib"
[expire]
; expire value that is selected per default
; make sure the value exists in [expire_options]
default = "1week"
[expire_options]
; Set each one of these to the number of seconds in the expiration period,
; or 0 if it should never expire
5min = 300
10min = 600
1hour = 3600
1day = 86400
1week = 604800
; Well this is not *exactly* one month, it's 30 days:
1month = 2592000
1year = 31536000
never = 0
[formatter_options]
; Set available formatters, their order and their labels
plaintext = "Plain Text"
syntaxhighlighting = "Source Code"
markdown = "Markdown"
[traffic]
; time limit between calls from the same IP address in seconds
; Set this to 0 to disable rate limiting.
limit = 10
; (optional) Set IPs addresses (v4 or v6) or subnets (CIDR) which are exempted
; from the rate-limit. Invalid IPs will be ignored. If multiple values are to
; be exempted, the list needs to be comma separated. Leave unset to disable
; exemptions.
; exempted = "1.2.3.4,10.10.10/24"
; (optional) If you want only some source IP addresses (v4 or v6) or subnets
; (CIDR) to be allowed to create pastes, set these here. Invalid IPs will be
; ignored. If multiple values are to be exempted, the list needs to be comma
; separated. Leave unset to allow anyone to create pastes.
; creators = "1.2.3.4,10.10.10/24"
; (optional) if your website runs behind a reverse proxy or load balancer,
; set the HTTP header containing the visitors IP address, i.e. X_FORWARDED_FOR
; header = "X_FORWARDED_FOR"
[purge]
; minimum time limit between two purgings of expired pastes, it is only
; triggered when pastes are created
; Set this to 0 to run a purge every time a paste is created.
limit = 300
; maximum amount of expired pastes to delete in one purge
; Set this to 0 to disable purging. Set it higher, if you are running a large
; site
batchsize = 10
[model]
; name of data model class to load and directory for storage
; the default model "Filesystem" stores everything in the filesystem
class = Filesystem
[model_options]
dir = PATH "data"
;[model]
; example of a Google Cloud Storage configuration
;class = GoogleCloudStorage
;[model_options]
;bucket = "my-private-bin"
;prefix = "pastes"
;uniformacl = false
;[model]
; example of DB configuration for MySQL
;class = Database
;[model_options]
;dsn = "mysql:host=localhost;dbname=privatebin;charset=UTF8"
;tbl = "privatebin_" ; table prefix
;usr = "privatebin"
;pwd = "Z3r0P4ss"
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of DB configuration for SQLite
;class = Database
;[model_options]
;dsn = "sqlite:" PATH "data/db.sq3"
;usr = null
;pwd = null
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of DB configuration for PostgreSQL
;class = Database
;[model_options]
;dsn = "pgsql:host=localhost;dbname=privatebin"
;tbl = "privatebin_" ; table prefix
;usr = "privatebin"
;pwd = "Z3r0P4ss"
;opt[12] = true ; PDO::ATTR_PERSISTENT
;[model]
; example of S3 configuration for Rados gateway / CEPH
;class = S3Storage
;[model_options]
;region = ""
;version = "2006-03-01"
;endpoint = "https://s3.my-ceph.invalid"
;use_path_style_endpoint = true
;bucket = "my-bucket"
;accesskey = "my-rados-user"
;secretkey = "my-rados-pass"
;[model]
; example of S3 configuration for AWS
;class = S3Storage
;[model_options]
;region = "eu-central-1"
;version = "latest"
;bucket = "my-bucket"
;accesskey = "access key id"
;secretkey = "secret access key"
;[model]
; example of S3 configuration for AWS using its SDK default credential provider chain
; if relying on environment variables, the AWS SDK will look for the following:
; - AWS_ACCESS_KEY_ID
; - AWS_SECRET_ACCESS_KEY
; - AWS_SESSION_TOKEN (if needed)
; for more details, see https://docs.aws.amazon.com/sdk-for-php/v3/developer-guide/guide_credentials.html#default-credential-chain
;class = S3Storage
;[model_options]
;region = "eu-central-1"
;version = "latest"
;bucket = "my-bucket"
;[yourls]
; When using YOURLS as a "urlshortener" config item:
; - By default, "urlshortener" will point to the YOURLS API URL, with or without
; credentials, and will be visible in public on the PrivateBin web page.
; Only use this if you allow short URL creation without credentials.
; - Alternatively, using the parameters in this section ("signature" and
; "apiurl"), "urlshortener" needs to point to the base URL of your PrivateBin
; instance with "?shortenviayourls&link=" appended. For example:
; urlshortener = "${basepath}?shortenviayourls&link="
; This URL will in turn call YOURLS on the server side, using the URL from
; "apiurl" and the "access signature" from the "signature" parameters below.
; (optional) the "signature" (access key) issued by YOURLS for the using account
; signature = ""
; (optional) the URL of the YOURLS API, called to shorten a PrivateBin URL
; apiurl = "https://yourls.example.com/yourls-api.php"
;[sri]
; Subresource integrity (SRI) hashes used in template files. Uncomment and set
; these for all js files used. See:
; https://github.com/PrivateBin/PrivateBin/wiki/FAQ#user-content-how-to-make-privatebin-work-when-i-have-changed-some-javascript-files
;js/privatebin.js = "sha512-[…]"

View File

@@ -1,33 +0,0 @@
services:
privatebin:
image: privatebin/nginx-fpm-alpine:latest
container_name: privatebin
restart: always
read_only: true
user: "1000:1000"
ports:
- "8585:8080"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Amsterdam
volumes:
- {{ privatebin_data_dir }}:/srv/data
- {{ privatebin_service_dir }}/conf.php:/srv/cfg/conf.php:ro
healthcheck:
test: ["CMD-SHELL", "nc -z 127.0.0.1 8080 || exit 1"]
interval: 10s
timeout: 5s
retries: 3
start_period: 90s
networks:
- caddy_network
deploy:
resources:
limits:
memory: 256M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,42 +0,0 @@
---
- name: Deploy PrivateBin service
block:
- name: Set PrivateBin directories
ansible.builtin.set_fact:
privatebin_data_dir: "/mnt/services/privatebin"
privatebin_service_dir: "{{ ansible_env.HOME }}/.services/privatebin"
- name: Create PrivateBin directories
ansible.builtin.file:
path: "{{ privatebin_dir }}"
state: directory
mode: "0755"
loop:
- "{{ privatebin_data_dir }}"
- "{{ privatebin_service_dir }}"
loop_control:
loop_var: privatebin_dir
- name: Deploy PrivateBin docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ privatebin_service_dir }}/docker-compose.yml"
mode: "0644"
register: privatebin_compose
- name: Deploy PrivateBin conf.php
ansible.builtin.template:
src: conf.php.j2
dest: "{{ privatebin_service_dir }}/conf.php"
mode: "0644"
- name: Stop PrivateBin service
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" down --remove-orphans
when: privatebin_compose.changed
- name: Start PrivateBin service
ansible.builtin.command: docker compose -f "{{ privatebin_service_dir }}/docker-compose.yml" up -d
when: privatebin_compose.changed
tags:
- services
- privatebin

View File

@@ -1,17 +0,0 @@
services:
qdrant:
image: qdrant/qdrant:latest
restart: always
ports:
- 6333:6333
- 6334:6334
expose:
- 6333
- 6334
- 6335
volumes:
- /mnt/services/qdrant:/qdrant/storage
deploy:
resources:
limits:
memory: 2G

View File

@@ -1,32 +0,0 @@
- name: Deploy Qdrant service
tags:
- services
- qdrant
block:
- name: Set Qdrant directories
ansible.builtin.set_fact:
qdrant_service_dir: "{{ ansible_env.HOME }}/.services/qdrant"
qdrant_data_dir: "/mnt/services/qdrant"
- name: Create Qdrant directory
ansible.builtin.file:
path: "{{ qdrant_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Qdrant docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ qdrant_service_dir }}/docker-compose.yml"
mode: "0644"
notify: restart_qdrant
- name: Stop Qdrant service
ansible.builtin.command: docker compose -f "{{ qdrant_service_dir }}/docker-compose.yml" down --remove-orphans
changed_when: false
listen: restart_qdrant
- name: Start Qdrant service
ansible.builtin.command: docker compose -f "{{ qdrant_service_dir }}/docker-compose.yml" up -d
changed_when: false
listen: restart_qdrant

View File

@@ -1,26 +0,0 @@
services:
juicefs-redis:
image: redis:latest
restart: always
ports:
- "6379:6379"
volumes:
- /mnt/services/redis:/data
command: ["redis-server", "--appendonly", "yes", "--requirepass", "{{ REDIS_PASSWORD }}"]
environment:
- TZ=Europe/Amsterdam
healthcheck:
test: ["CMD", "redis-cli", "-a", "{{ REDIS_PASSWORD }}", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
networks:
- juicefs-network
deploy:
resources:
limits:
memory: 256M
networks:
juicefs-network:

View File

@@ -1,82 +0,0 @@
---
- name: Deploy Redis for JuiceFS
block:
- name: Set Redis facts
ansible.builtin.set_fact:
redis_service_dir: "{{ ansible_env.HOME }}/.services/juicefs-redis"
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
- name: Create Redis service directory
ansible.builtin.file:
path: "{{ redis_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Redis docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ redis_service_dir }}/docker-compose.yml"
mode: "0644"
register: redis_compose
vars:
REDIS_PASSWORD: "{{ redis_password }}"
- name: Check if juicefs.service exists
ansible.builtin.stat:
path: /etc/systemd/system/juicefs.service
register: juicefs_service_stat
- name: Stop juicefs.service to umount JuiceFS
ansible.builtin.systemd:
name: juicefs.service
state: stopped
enabled: false
register: juicefs_stop
changed_when: juicefs_stop.changed
when: redis_compose.changed and juicefs_service_stat.stat.exists
become: true
- name: List containers that are running
ansible.builtin.command: docker ps -q
register: docker_ps
changed_when: docker_ps.rc == 0
when: redis_compose.changed
- name: Stop all docker containers
ansible.builtin.command: docker stop {{ item }}
loop: "{{ docker_ps.stdout_lines }}"
register: docker_stop
changed_when: docker_stop.rc == 0
when: redis_compose.changed
ignore_errors: true
- name: Start Redis service
ansible.builtin.command: docker compose -f "{{ redis_service_dir }}/docker-compose.yml" up -d
register: redis_start
changed_when: redis_start.rc == 0
- name: Wait for Redis to be ready
ansible.builtin.wait_for:
host: localhost
port: 6379
timeout: 30
- name: Start juicefs.service to mount JuiceFS
ansible.builtin.systemd:
name: juicefs.service
state: started
enabled: true
register: juicefs_start
changed_when: juicefs_start.changed
when: juicefs_service_stat.stat.exists
become: true
- name: Restart containers that were stopped
ansible.builtin.command: docker start {{ item }}
loop: "{{ docker_stop.results | map(attribute='item') | list }}"
register: docker_restart
changed_when: docker_restart.rc == 0
when: redis_compose.changed
tags:
- services
- redis

View File

@@ -1,53 +0,0 @@
# Production Environment Variables
# Copy this to .env and fill in your values
# Database configuration (PostgreSQL)
DB_TYPE=postgres
DB_HOST=postgres
DB_PORT=5432
DB_USER=sathub
DB_PASSWORD={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='DB_PASSWORD') }}
DB_NAME=sathub
# Required: JWT secret for token signing
JWT_SECRET={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='JWT_SECRET') }}
# Required: Two-factor authentication encryption key
TWO_FA_ENCRYPTION_KEY={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='TWO_FA_ENCRYPTION_KEY') }}
# Email configuration (required for password resets)
SMTP_HOST={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='SMTP_HOST') }}
SMTP_PORT={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='SMTP_PORT') }}
SMTP_USERNAME={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='SMTP_USERNAME') }}
SMTP_PASSWORD={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='SMTP_PASSWORD') }}
SMTP_FROM_EMAIL={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='SMTP_FROM_EMAIL') }}
# MinIO Object Storage configuration
MINIO_ROOT_USER={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='MINIO_ROOT_USER') }}
MINIO_ROOT_PASSWORD={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='MINIO_ROOT_PASSWORD') }}
# Basically the same as the above
MINIO_ACCESS_KEY={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='MINIO_ROOT_USER') }}
MINIO_SECRET_KEY={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='MINIO_ROOT_PASSWORD') }}
# GitHub credentials for Watchtower (auto-updates)
GITHUB_USER={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='GITHUB_USER') }}
GITHUB_PAT={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='GITHUB_PAT') }}
REPO_USER={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='GITHUB_USER') }}
REPO_PASS={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='GITHUB_PAT') }}
# Optional: Override defaults if needed
# GIN_MODE=release (set automatically)
FRONTEND_URL=https://sathub.de
# CORS configuration (optional - additional allowed origins)
CORS_ALLOWED_ORIGINS=https://sathub.de,https://sathub.nl,https://api.sathub.de
# Frontend configuration (optional - defaults are provided)
VITE_API_BASE_URL=https://api.sathub.de
VITE_ALLOWED_HOSTS=sathub.de,sathub.nl
# Discord related messsaging
DISCORD_CLIENT_ID={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='DISCORD_CLIENT_ID') }}
DISCORD_CLIENT_SECRET={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='DISCORD_CLIENT_SECRET') }}
DISCORD_REDIRECT_URI={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='DISCORD_REDIRECT_URL') }}
DISCORD_WEBHOOK_URL={{ lookup('community.general.onepassword', 'sathub', vault='Dotfiles', field='DISCORD_WEBHOOK_URL') }}

View File

@@ -1,182 +0,0 @@
services:
# Migration service - runs once on stack startup
migrate:
image: ghcr.io/vleeuwenmenno/sathub-backend/backend:latest
container_name: sathub-migrate
restart: "no"
command: ["./main", "auto-migrate"]
environment:
- GIN_MODE=release
# Database settings
- DB_TYPE=postgres
- DB_HOST=postgres
- DB_PORT=5432
- DB_USER=${DB_USER:-sathub}
- DB_PASSWORD=${DB_PASSWORD}
- DB_NAME=${DB_NAME:-sathub}
# MinIO settings
- MINIO_ENDPOINT=http://minio:9000
- MINIO_BUCKET=sathub-images
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY}
- MINIO_EXTERNAL_URL=https://obj.sathub.de
networks:
- sathub
depends_on:
- postgres
backend:
image: ghcr.io/vleeuwenmenno/sathub-backend/backend:latest
container_name: sathub-backend
restart: unless-stopped
command: ["./main", "api"]
environment:
- GIN_MODE=release
- FRONTEND_URL=${FRONTEND_URL:-https://sathub.de}
- CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS:-https://sathub.de}
# Database settings
- DB_TYPE=postgres
- DB_HOST=postgres
- DB_PORT=5432
- DB_USER=${DB_USER:-sathub}
- DB_PASSWORD=${DB_PASSWORD}
- DB_NAME=${DB_NAME:-sathub}
# Security settings
- JWT_SECRET=${JWT_SECRET}
- TWO_FA_ENCRYPTION_KEY=${TWO_FA_ENCRYPTION_KEY}
# SMTP settings
- SMTP_HOST=${SMTP_HOST}
- SMTP_PORT=${SMTP_PORT}
- SMTP_USERNAME=${SMTP_USERNAME}
- SMTP_PASSWORD=${SMTP_PASSWORD}
- SMTP_FROM_EMAIL=${SMTP_FROM_EMAIL}
# MinIO settings
- MINIO_ENDPOINT=http://minio:9000
- MINIO_BUCKET=sathub-images
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY}
- MINIO_EXTERNAL_URL=https://obj.sathub.de
# Discord settings
- DISCORD_CLIENT_ID=${DISCORD_CLIENT_ID}
- DISCORD_CLIENT_SECRET=${DISCORD_CLIENT_SECRET}
- DISCORD_REDIRECT_URI=${DISCORD_REDIRECT_URI}
- DISCORD_WEBHOOK_URL=${DISCORD_WEBHOOK_URL}
networks:
- sathub
- caddy_network
depends_on:
migrate:
condition: service_completed_successfully
worker:
image: ghcr.io/vleeuwenmenno/sathub-backend/backend:latest
container_name: sathub-worker
restart: unless-stopped
command: ["./main", "worker"]
environment:
- GIN_MODE=release
# Database settings
- DB_TYPE=postgres
- DB_HOST=postgres
- DB_PORT=5432
- DB_USER=${DB_USER:-sathub}
- DB_PASSWORD=${DB_PASSWORD}
- DB_NAME=${DB_NAME:-sathub}
# SMTP settings (needed for notifications)
- SMTP_HOST=${SMTP_HOST}
- SMTP_PORT=${SMTP_PORT}
- SMTP_USERNAME=${SMTP_USERNAME}
- SMTP_PASSWORD=${SMTP_PASSWORD}
- SMTP_FROM_EMAIL=${SMTP_FROM_EMAIL}
# MinIO settings
- MINIO_ENDPOINT=http://minio:9000
- MINIO_BUCKET=sathub-images
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY}
- MINIO_EXTERNAL_URL=https://obj.sathub.de
# Discord settings
- DISCORD_CLIENT_ID=${DISCORD_CLIENT_ID}
- DISCORD_CLIENT_SECRET=${DISCORD_CLIENT_SECRET}
- DISCORD_REDIRECT_URI=${DISCORD_REDIRECT_URI}
- DISCORD_WEBHOOK_URL=${DISCORD_WEBHOOK_URL}
networks:
- sathub
depends_on:
migrate:
condition: service_completed_successfully
postgres:
image: postgres:15-alpine
container_name: sathub-postgres
restart: unless-stopped
environment:
- POSTGRES_USER=${DB_USER:-sathub}
- POSTGRES_PASSWORD=${DB_PASSWORD}
- POSTGRES_DB=${DB_NAME:-sathub}
volumes:
- {{ sathub_data_dir }}/postgres_data:/var/lib/postgresql/data
networks:
- sathub
frontend:
image: ghcr.io/vleeuwenmenno/sathub-frontend/frontend:latest
container_name: sathub-frontend
restart: unless-stopped
environment:
- VITE_API_BASE_URL=${VITE_API_BASE_URL:-https://api.sathub.de}
- VITE_ALLOWED_HOSTS=${VITE_ALLOWED_HOSTS:-sathub.de,sathub.nl}
networks:
- sathub
- caddy_network
minio:
image: minio/minio
container_name: sathub-minio
restart: unless-stopped
environment:
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}
volumes:
- {{ sathub_data_dir }}/minio_data:/data
command: server /data --console-address :9001
networks:
- sathub
depends_on:
- postgres
watchtower:
image: containrrr/watchtower:latest
container_name: sathub-watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_STOPPED=false
- REPO_USER=${REPO_USER}
- REPO_PASS=${REPO_PASS}
command: --interval 30 --cleanup --include-stopped=false sathub-backend sathub-worker sathub-frontend
networks:
- sathub
networks:
sathub:
driver: bridge
# We assume you're running a Caddy instance in a separate compose file with this network
# If not, you can remove this network and the related depends_on in the services above
# But the stack is designed to run behind a Caddy reverse proxy for SSL termination and routing
caddy_network:
external: true
name: caddy_default

View File

@@ -1,50 +0,0 @@
---
- name: Deploy SatHub service
block:
- name: Set SatHub directories
ansible.builtin.set_fact:
sathub_service_dir: "{{ ansible_env.HOME }}/.services/sathub"
sathub_data_dir: "/mnt/services/sathub"
- name: Set SatHub frontend configuration
ansible.builtin.set_fact:
frontend_api_base_url: "https://api.sathub.de"
frontend_allowed_hosts: "sathub.de,sathub.nl"
cors_allowed_origins: "https://sathub.nl,https://api.sathub.de,https://obj.sathub.de"
- name: Create SatHub directory
ansible.builtin.file:
path: "{{ sathub_service_dir }}"
state: directory
mode: "0755"
- name: Create SatHub data directory
ansible.builtin.file:
path: "{{ sathub_data_dir }}"
state: directory
mode: "0755"
- name: Deploy SatHub .env
ansible.builtin.template:
src: .env.j2
dest: "{{ sathub_service_dir }}/.env"
mode: "0644"
register: sathub_env
- name: Deploy SatHub docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ sathub_service_dir }}/docker-compose.yml"
mode: "0644"
register: sathub_compose
- name: Stop SatHub service
ansible.builtin.command: docker compose -f "{{ sathub_service_dir }}/docker-compose.yml" down --remove-orphans
when: sathub_compose.changed or sathub_env.changed
- name: Start SatHub service
ansible.builtin.command: docker compose -f "{{ sathub_service_dir }}/docker-compose.yml" up -d
when: sathub_compose.changed or sathub_env.changed
tags:
- services
- sathub

View File

@@ -1,43 +0,0 @@
---
- name: Cleanup disabled services
block:
- name: Prepare cleanup list
ansible.builtin.set_fact:
services_to_cleanup: "{{ services | selectattr('enabled', 'equalto', false) | list }}"
- name: Check service directories existence for disabled services
ansible.builtin.stat:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
register: service_dir_results
loop: "{{ services_to_cleanup }}"
loop_control:
label: "{{ item.name }}"
- name: Filter services with existing directories
ansible.builtin.set_fact:
services_with_dirs: "{{ service_dir_results.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item') | list }}"
- name: Check if docker-compose file exists for services to cleanup
ansible.builtin.stat:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}/docker-compose.yml"
register: compose_file_results
loop: "{{ services_with_dirs }}"
loop_control:
label: "{{ item.name }}"
- name: Stop disabled services with docker-compose files
ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/.services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans
loop: "{{ compose_file_results.results | selectattr('stat.exists', 'equalto', true) }}"
loop_control:
label: "{{ item.item.name }}"
register: service_stop_results
become: false
failed_when: false # Continue even if the command fails
- name: Remove service directories for disabled services
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.services/{{ item.name }}"
state: absent
loop: "{{ services_with_dirs }}"
loop_control:
label: "{{ item.name }}"

View File

@@ -1,41 +0,0 @@
services:
stash:
image: stashapp/stash:latest
container_name: stash
restart: unless-stopped
ports:
- "9999:9999"
environment:
- PUID=1000
- PGID=1000
- STASH_STASH=/data/
- STASH_GENERATED=/generated/
- STASH_METADATA=/metadata/
- STASH_CACHE=/cache/
- STASH_PORT=9999
volumes:
- /etc/localtime:/etc/localtime:ro
## Point this at your collection.
- {{ stash_data_dir }}:/data
## Keep configs, scrapers, and plugins here.
- {{ stash_config_dir }}/config:/root/.stash
## This is where your stash's metadata lives
- {{ stash_config_dir }}/metadata:/metadata
## Any other cache content.
- {{ stash_config_dir }}/cache:/cache
## Where to store binary blob data (scene covers, images)
- {{ stash_config_dir }}/blobs:/blobs
## Where to store generated content (screenshots,previews,transcodes,sprites)
- {{ stash_config_dir }}/generated:/generated
networks:
- caddy_network
deploy:
resources:
limits:
memory: 2G
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,25 +0,0 @@
---
services:
tautulli:
image: lscr.io/linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1000
- PGID=100
- TZ=Etc/Amsterdam
volumes:
- {{ tautulli_data_dir }}:/config
ports:
- 8181:8181
restart: unless-stopped
networks:
- caddy_network
deploy:
resources:
limits:
memory: 512M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,36 +0,0 @@
---
- name: Deploy Tautulli service
block:
- name: Set Tautulli directories
ansible.builtin.set_fact:
tautulli_data_dir: "{{ '/mnt/services/tautulli' }}"
tautulli_service_dir: "{{ ansible_env.HOME }}/.services/tautulli"
- name: Create Tautulli directories
ansible.builtin.file:
path: "{{ tautulli_dir }}"
state: directory
mode: "0755"
loop:
- "{{ tautulli_data_dir }}"
- "{{ tautulli_service_dir }}"
loop_control:
loop_var: tautulli_dir
- name: Deploy Tautulli docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ tautulli_service_dir }}/docker-compose.yml"
mode: "0644"
register: tautulli_compose
- name: Stop Tautulli service
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" down --remove-orphans
when: tautulli_compose.changed
- name: Start Tautulli service
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" up -d
when: tautulli_compose.changed
tags:
- services
- tautulli

View File

@@ -1,65 +0,0 @@
services:
unifi-controller:
image: linuxserver/unifi-network-application:latest
restart: unless-stopped
ports:
- "8080:8080" # Device communication
- "8443:8443" # Controller GUI / API
- "3478:3478/udp" # STUN
- "10001:10001/udp" # AP discovery
- "8880:8880" # HTTP portal redirect (guest hotspot)
- "8843:8843" # HTTPS portal redirect (guest hotspot)
- "6789:6789" # Mobile speed test (optional)
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MONGO_USER=unifi
- MONGO_PASS=unifi
- MONGO_HOST=unifi-db
- MONGO_PORT=27017
- MONGO_DBNAME=unifi
- MONGO_AUTHSOURCE=admin
volumes:
- {{ unifi_network_application_data_dir }}/data:/config
depends_on:
- unifi-db
networks:
- unifi-network
- caddy_network
sysctls:
- net.ipv6.conf.all.disable_ipv6=1
deploy:
resources:
limits:
memory: 1G
unifi-db:
image: mongo:6.0
restart: unless-stopped
volumes:
- {{ unifi_network_application_data_dir }}/db:/data/db
- {{ unifi_network_application_data_dir }}/init-mongo.sh:/docker-entrypoint-initdb.d/init-mongo.sh:ro
environment:
- MONGO_INITDB_ROOT_USERNAME=root
- MONGO_INITDB_ROOT_PASSWORD=root
- MONGO_INITDB_DATABASE=unifi
- MONGO_USER=unifi
- MONGO_PASS=unifi
- MONGO_DBNAME=unifi
- MONGO_AUTHSOURCE=admin
networks:
- unifi-network
sysctls:
- net.ipv6.conf.all.disable_ipv6=1
deploy:
resources:
limits:
memory: 1G
networks:
unifi-network:
driver: bridge
caddy_network:
external: true
name: caddy_default

View File

@@ -1,78 +0,0 @@
---
- name: Deploy Unifi Network App service
block:
- name: Set Unifi Network App directories
ansible.builtin.set_fact:
unifi_network_application_data_dir: "/mnt/services/unifi_network_application"
unifi_network_application_service_dir: "{{ ansible_env.HOME }}/.services/unifi_network_application"
- name: Create Unifi Network App directories
ansible.builtin.file:
path: "{{ unifi_network_application_dir }}"
state: directory
mode: "0755"
loop:
- "{{ unifi_network_application_data_dir }}"
- "{{ unifi_network_application_data_dir }}/data"
- "{{ unifi_network_application_data_dir }}/db"
- "{{ unifi_network_application_service_dir }}"
loop_control:
loop_var: unifi_network_application_dir
- name: Create MongoDB initialization script
ansible.builtin.copy:
content: |
#!/bin/bash
if which mongosh > /dev/null 2>&1; then
mongo_init_bin='mongosh'
else
mongo_init_bin='mongo'
fi
"${mongo_init_bin}" <<EOF
use ${MONGO_AUTHSOURCE}
db.auth("${MONGO_INITDB_ROOT_USERNAME}", "${MONGO_INITDB_ROOT_PASSWORD}")
db.createUser({
user: "${MONGO_USER}",
pwd: "${MONGO_PASS}",
roles: [
{ db: "${MONGO_DBNAME}", role: "dbOwner" },
{ db: "${MONGO_DBNAME}_stat", role: "dbOwner" },
{ db: "${MONGO_DBNAME}_audit", role: "dbOwner" }
]
})
EOF
dest: "{{ unifi_network_application_data_dir }}/init-mongo.sh"
mode: "0755"
register: unifi_mongo_init_script
- name: Deploy Unifi Network App docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ unifi_network_application_service_dir }}/docker-compose.yml"
mode: "0644"
register: unifi_network_application_compose
- name: Clean MongoDB database for fresh initialization
ansible.builtin.file:
path: "{{ unifi_network_application_data_dir }}/db"
state: absent
when: unifi_mongo_init_script.changed
- name: Recreate MongoDB database directory
ansible.builtin.file:
path: "{{ unifi_network_application_data_dir }}/db"
state: directory
mode: "0755"
when: unifi_mongo_init_script.changed
- name: Stop Unifi Network App service
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" down --remove-orphans
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
- name: Start Unifi Network App service
ansible.builtin.command: docker compose -f "{{ unifi_network_application_service_dir }}/docker-compose.yml" up -d
when: unifi_network_application_compose.changed or unifi_mongo_init_script.changed
tags:
- services
- unifi

View File

@@ -1,23 +0,0 @@
services:
wireguard:
image: lscr.io/linuxserver/wireguard:latest
cap_add:
- NET_ADMIN
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- SERVERURL=mvl.sh
- PEERS=worklaptop,phone,desktop,personallaptop
- ALLOWEDIPS=0.0.0.0/0, ::/0
volumes:
- "{{ wireguard_data_dir }}/wg-data:/config"
ports:
- 51820:51820/udp
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M

View File

@@ -1,31 +0,0 @@
---
- name: Deploy WireGuard service
block:
- name: Set WireGuard directories
ansible.builtin.set_fact:
wireguard_service_dir: "{{ ansible_env.HOME }}/.services/wireguard"
wireguard_data_dir: "/mnt/services/wireguard"
- name: Create WireGuard directory
ansible.builtin.file:
path: "{{ wireguard_service_dir }}"
state: directory
mode: "0755"
- name: Deploy WireGuard docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ wireguard_service_dir }}/docker-compose.yml"
mode: "0644"
register: wireguard_compose
- name: Stop WireGuard service
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" down --remove-orphans
when: wireguard_compose.changed
- name: Start WireGuard service
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" up -d
when: wireguard_compose.changed
tags:
- services
- wireguard

View File

@@ -1,51 +0,0 @@
---
- name: Process 1Password custom allowed browsers
block:
- name: Check if 1Password is installed
ansible.builtin.command: 1password --version
register: onepassword_check
changed_when: false
failed_when: false
- name: Check if 1Password is running anywhere
ansible.builtin.command: pgrep 1password
register: onepassword_running
changed_when: false
failed_when: false
- name: Ensure 1Password custom allowed browsers directory exists
ansible.builtin.file:
path: /etc/1password
state: directory
mode: "0755"
become: true
- name: Add Browsers to 1Password custom allowed browsers
ansible.builtin.copy:
content: |
ZenBrowser
zen-browser
app.zen_browser.zen
zen
Firefox
firefox
opera
zen-x86_64
dest: /etc/1password/custom_allowed_browsers
owner: root
group: root
mode: "0755"
become: true
register: custom_browsers_file
- name: Kill any running 1Password instances if configuration changed
ansible.builtin.command: pkill 1password
when: custom_browsers_file.changed and onepassword_running.stdout != ""
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
- name: If 1Password was killed, restart it...
ansible.builtin.command: screen -dmS 1password 1password
when: custom_browsers_file.changed and onepassword_running.stdout != ""
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
tags:
- custom_allowed_browsers

View File

@@ -1,6 +0,0 @@
# Mark all files under the real autostart source as executable
- name: Mark all files under dotfiles autostart as executable
ansible.builtin.file:
path: "{{ lookup('env', 'DOTFILES_PATH') }}/config/autostart"
mode: "u+x,g+x,o+x"
recurse: true

View File

@@ -1,41 +0,0 @@
---
- name: Ensure wl-clipboard and cliphist are installed
become: true
package:
name:
- wl-clipboard
- cliphist
- wofi
state: present
- name: Create systemd user service for cliphist
become: false
copy:
dest: "{{ ansible_env.HOME }}/.config/systemd/user/cliphist-store.service"
mode: "0644"
content: |
[Unit]
Description=Store clipboard history with cliphist
[Service]
ExecStart=/usr/bin/sh -c 'wl-paste --watch cliphist store'
Restart=on-failure
[Install]
WantedBy=default.target
- name: Reload systemd user daemon
become: false
systemd:
daemon_reload: yes
scope: user
# TO ENABLE READING, ADD A KEYBOARD SHORTCUT SOMEWHERE IN YOUR WM CONFIGURATION
# cliphist list | wofi -S dmenu | cliphist decode | wl-copy
- name: Enable and start cliphist-store service
become: false
systemd:
name: cliphist-store.service
enabled: yes
state: started
scope: user

Some files were not shown because too many files have changed in this diff Show More