Compare commits

..

4 Commits

292 changed files with 395078 additions and 14713 deletions

View File

151
.bashrc
View File

@@ -3,32 +3,14 @@ HISTFILE=~/.bash_history
HISTSIZE=1000
HISTFILESIZE=2000 # Adjusted to match both histfile and size criteria
if [ -f /etc/os-release ]; then
distro=$(awk -F= '/^NAME/{print $ssss2}' /etc/os-release | tr -d '"')
if [[ "$distro" == *"Pop!_OS"* ]]; then
export CGO_CFLAGS="-I/usr/include"
fi
fi
# For microsoft-standard-WSL2 in uname -a
if [[ "$(uname -a)" == *"microsoft-standard-WSL2"* ]]; then
source $HOME/.agent-bridge.sh
alias winget='winget.exe'
fi
# Set SSH_AUTH_SOCK to ~/.1password/agent.sock, but only if we don't already have a SSH_AUTH_SOCK
if [ -z "$SSH_AUTH_SOCK" ]; then
export SSH_AUTH_SOCK=~/.1password/agent.sock
fi
# If brave is available as browser set CHROME_EXECUTABLE to that.
if command -v brave-browser &> /dev/null; then
export CHROME_EXECUTABLE=/usr/bin/brave-browser
fi
# Docker Compose Alias (Mostly for old shell scripts)
alias docker-compose='docker compose'
# Home Manager Configuration
alias hm='cd $HOME/dotfiles/config/home-manager/ && home-manager'
alias hmnews='hm news --flake .#$DOTF_HOSTNAME'
alias hmup='hm switch --flake .#$DOTF_HOSTNAME --impure'
# Modern tools aliases
alias l="eza --header --long --git --group-directories-first --group --icons --color=always --sort=name --hyperlink -o --no-permissions"
alias ll='l'
@@ -36,7 +18,7 @@ alias la='l -a'
alias cat='bat'
alias du='dust'
alias df='duf'
alias augp='sudo apt update && sudo apt upgrade -y && sudo apt autopurge -y && sudo apt autoclean'
alias rm="trash-put"
# Docker Aliases
alias d='docker'
@@ -47,10 +29,8 @@ alias dcd='docker compose down'
alias dcu='docker compose up'
alias dcp='docker compose ps'
alias dcps='docker compose ps'
alias dcpr='dcp && dcd && dcu -d && dcl -f'
alias dcr='dcd && dcu -d && dcl -f'
alias dcr='docker compose run'
alias ddpul='docker compose down && docker compose pull && docker compose up -d && docker compose logs -f'
alias docker-nuke='docker kill $(docker ps -q) && docker rm $(docker ps -a -q) && docker system prune --all --volumes --force && docker volume prune --force'
# Git aliases
alias g='git'
@@ -65,61 +45,23 @@ alias gcm='git commit -m'
alias gco='git checkout'
alias gcb='git checkout -b'
# Kubernetes aliases (Minikube)
alias kubectl="minikube kubectl --"
alias zeditor=~/.local/bin/zed
alias zed=~/.local/bin/zed
alias ssh="~/.local/bin/smart-ssh"
# random string (Syntax: random <length>)
alias random='openssl rand -base64'
# netstat port in use check
alias port='netstat -atupn | grep LISTEN'
# Alias for ls to l but only if it's an interactive shell because we don't want to override ls in scripts which could blow up in our face
if [ -t 1 ]; then
alias ls='l'
fi
# Alias for ssh.exe and ssh-add.exe on Windows WSL (microsoft-standard-WSL2)
if [[ $(uname -a) == *"microsoft-standard-WSL2"* ]]; then
alias op='op.exe'
fi
# PATH Manipulation
export DOTFILES_PATH=$HOME/.dotfiles
export PATH=$PATH:$HOME/.local/bin
export PATH=$PATH:$HOME/.cargo/bin
export PATH=$PATH:$DOTFILES_PATH/bin
export PATH="/usr/bin:$PATH"
if [ -d /usr/lib/pkgconfig ]; then
export PKG_CONFIG_PATH=/usr/lib/pkgconfig:/usr/share/pkgconfig:$PKG_CONFIG_PATH
fi
# Include spicetify if it exists
if [ -d "$HOME/.spicetify" ]; then
export PATH=$PATH:$HOME/.spicetify
fi
# Include pyenv if it exists
if [ -d "$HOME/.pyenv" ]; then
export PYENV_ROOT="$HOME/.pyenv"
[[ -d $PYENV_ROOT/bin ]] && export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - bash)"
fi
# Include pnpm if it exists
if [ -d "$HOME/.local/share/pnpm" ]; then
export PATH=$PATH:$HOME/.local/share/pnpm
fi
# Miniconda
export PATH="$HOME/miniconda3/bin:$PATH"
# In case $HOME/.flutter/flutter/bin is found, we can add it to the PATH
if [ -d "$HOME/.flutter/flutter/bin" ]; then
export PATH=$PATH:$HOME/.flutter/flutter/bin
export PATH="$PATH":"$HOME/.pub-cache/bin"
# Flutter linux fixes:
export CPPFLAGS="-I/usr/include"
export LDFLAGS="-L/usr/lib/x86_64-linux-gnu -lbz2"
export PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig:$PKG_CONFIG_PATH
fi
export PATH=$PATH:$HOME/dotfiles/bin
# Add flatpak to XDG_DATA_DIRS
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/usr/share:/var/lib/flatpak/exports/share:$HOME/.local/share/flatpak/exports/share
@@ -130,11 +72,21 @@ export NIXPKGS_ALLOW_UNFREE=1
# Allow insecure nixpkgs
export NIXPKGS_ALLOW_INSECURE=1
# Set DOTF_HOSTNAME to the hostname from .hostname file
# If this file doesn't exist, use mennos-unknown-hostname
export DOTF_HOSTNAME="mennos-unknown-hostname"
if [ -f $HOME/.hostname ]; then
export DOTF_HOSTNAME=$(cat $HOME/.hostname)
fi
# Tradaware / DiscountOffice Configuration
if [ -d "/home/menno/Projects/Work" ]; then
export TRADAWARE_DEVOPS=true
fi
# Flutter Web and other tools that require Chrome
export CHROME_EXECUTABLE=$(which brave)
# 1Password Source Plugin (Assuming bash compatibility)
if [ -f /home/menno/.config/op/plugins.sh ]; then
source /home/menno/.config/op/plugins.sh
@@ -149,25 +101,43 @@ else
eval "$(starship init bash)"
fi
# Read .op_sat
if [ -f ~/.op_sat ]; then
export OP_SERVICE_ACCOUNT_TOKEN=$(cat ~/.op_sat)
# Ensure .op_sat is 0600 and only readable by the owner
if [ "$(stat -c %a ~/.op_sat)" != "600" ]; then
echo "WARNING: ~/.op_sat is not 0600, please fix this!"
fi
if [ "$(stat -c %U ~/.op_sat)" != "$(whoami)" ]; then
echo "WARNING: ~/.op_sat is not owned by the current user, please fix this!"
fi
fi
# Source nix home-manager
if [ -f "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh" ]; then
. "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh"
fi
# Source agent-bridge script for 1password
source $HOME/dotfiles/bin/1password-agent-bridge.sh
# zoxide if available
if command -v zoxide &> /dev/null; then
eval "$(zoxide init bash)"
fi
# Check if we are running from zellij, if not then launch it
launch_zellij_conditionally() {
if [ -z "$ZELLIJ" ]; then
# Don't launch zellij in tmux, vscode, screen or zeditor.
if [ ! -t 1 ] || [ -n "$TMUX" ] || [ -n "$VSCODE_STABLE" ] || [ -n "$STY" ] || [ -n "$ZED_TERM" ]; then
return
fi
# Launch zellij
zellij
# Exit if zellij exits properly with a zero exit code
if [ $? -eq 0 ]; then
exit $?
fi
echo "Zellij exited with a non-zero exit code, falling back to regular shell."
return
fi
}
# Disabled for now, I don't like the way it behaves but I don't want to remove it either
# launch_zellij_conditionally
# Source ble.sh if it exists
if [[ -f "${HOME}/.nix-profile/share/blesh/ble.sh" ]]; then
source "${HOME}/.nix-profile/share/blesh/ble.sh"
@@ -191,12 +161,7 @@ if [[ -f "${HOME}/.nix-profile/share/blesh/ble.sh" ]]; then
bind -x '"\C-r": fzf_history_search'
fi
# In case a basrc.local exists, source it
if [ -f $HOME/.bashrc.local ]; then
source $HOME/.bashrc.local
fi
# Display a welcome message for interactive shells
if [ -t 1 ]; then
helloworld
dotf hello
fi

View File

@@ -1,37 +0,0 @@
name: Ansible Lint Check
on:
pull_request:
push:
branches: [ master ]
jobs:
check-ansible:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Ansible and ansible-lint
run: |
python -m pip install --upgrade pip
python -m pip install ansible ansible-lint
- name: Run ansible-lint
run: |
if [ ! -d "config/ansible" ]; then
echo "No ansible directory found at config/ansible"
exit 0
fi
found_files=$(find config/ansible -name "*.yml" -o -name "*.yaml")
if [ -z "$found_files" ]; then
echo "No Ansible files found in config/ansible to lint"
exit 0
fi
ansible-lint $found_files

View File

@@ -1,42 +0,0 @@
name: Python Lint Check
on:
pull_request:
push:
branches: [ master ]
jobs:
check-python:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Python linting tools
run: |
python -m pip install --upgrade pip
python -m pip install pylint black
- name: Run pylint
run: |
python_files=$(find . -name "*.py" -type f)
if [ -z "$python_files" ]; then
echo "No Python files found to lint"
exit 0
fi
pylint $python_files
- name: Check Black formatting
run: |
python_files=$(find . -name "*.py" -type f)
if [ -z "$python_files" ]; then
echo "No Python files found to lint"
exit 0
fi
black --check $python_files

10
.gitignore vendored
View File

@@ -1,2 +1,10 @@
config/ssh/config.d/*
!config/ssh/config.d/*.gpg
logs/*
**/__pycache__/
# Don't include secrets in the repository but do include encrypted secrets
!secrets/**/*.gpg
secrets/**/*.*
# SHA256 hashes of the encrypted secrets
*.sha256

View File

@@ -6,9 +6,5 @@
"**/CVS": true,
"**/.DS_Store": true,
"**/*.sha256": true,
},
"files.associations": {
"*.yml": "ansible"
},
"ansible.python.interpreterPath": "/usr/bin/python3"
}
}

View File

@@ -1,30 +1,31 @@
# Setup
This dotfiles is intended to be used with either Fedora 40>, Ubuntu 20.04> or Arch Linux.
Please install a clean version of either distro with GNOME and then follow the steps below.
This dotfiles is intended to be used with NixOS 24.05
Please install a clean version of NixOS GNOME and then follow the steps below.
## Installation
### 0. Install distro
### 0. Install NixOS
Download the latest ISO from your desired distro and write it to a USB stick.
I'd recommend getting the GNOME version as it's easier to setup unless you're planning on setting up a server, in that case I recommend getting the server ISO for the specific distro.
Download the latest NixOS ISO from the [NixOS website](https://nixos.org/download.html) and write it to a USB stick.
I'd recommend getting the GNOME version as it's easier to setup and you can select minimal from the installer anyway if you want to just setup a headless server.
#### Note: If you intend on using a desktop environment you should select the GNOME version as this dotfiles repository expects the GNOME desktop environment for various configurations
### 1. Clone dotfiles to home directory
Open a shell and begin the setup process. This setup requires you to provide a hostname as a parameter. You can use an existing hostname to restore an old system or choose a new name.
Open a nix-shell with git and begin the setup process. This setup will prompt you various questions such as your desired hostname and if the system you are installing is supposed to be a server or workstation.
If you are running this in a VM be sure to answer yes if it prompts you.
Feel free to use an exisiting hostname to restore an old system or chose a new name.
If you are running this in a VM be sure to answer yes if it prompts you. This will ensure it generates the correct boot loader configuration.
```bash
curl -L https://df.mvl.sh | bash -s your-hostname
nix-shell -p git
curl -L https://df.mvl.sh | bash
```
Replace `your-hostname` with your desired hostname for this machine.
### 2. Relog/Reboot
### 2. Reboot
It's probably a good idea that you either reboot or log out and log back in to make sure all the changes are applied.
@@ -32,11 +33,7 @@ It's probably a good idea that you either reboot or log out and log back in to m
# sudo reboot
```
### 3. Create ~/.op_sat (Optional)
For servers you can place a file `~/.op_sat` with your 1Password Service Access Token, this can then be used by Ansible to fetch secrets for services. This is mostly for server systems so you're able to skip it for workstations.
### 4. Run `dotf update`
### 3. Run `dotf update`
Run the `dotf update` command, although the setup script did most of the work some symlinks still need to be set which at the moment is done using shell scripts.
@@ -44,6 +41,10 @@ Run the `dotf update` command, although the setup script did most of the work so
dotf update
```
### 4. Setup 1Password (Optional)
1Password is installed but you need to login and enable the SSH agent and CLI components under the settings before continuing.
### 5. Decrypt secrets
Either using 1Password or by manualling providing the decryption key you should decrypt the secrets.
@@ -64,38 +65,11 @@ You should now have a fully setup system with all the configurations applied.
Here are some paths that contain files named after the hostname of the system.
If you add a new system you should add the relevant files to these paths.
- `nconfig/nixos/hardware/`: Contains the hardware configurations for the different systems.
- `config/ssh/authorized_keys`: Contains the public keys per hostname that will be symlinked to the `~/.ssh/authorized_keys` file.
- `config/nixos/flake.nix`: Contains an array `nixosConfigurations` where you should be adding the new system hostname and relevant configuration.
- `config/home-manager/flake.nix`: Contains an array `homeConfigurations` where you should be adding the new system hostname and relevant configuration.
### Server reboots
In case you reboot a server, it's likely that this runs JuiceFS.
To be sure that every service is properly accessing JuiceFS mounted files you should probably restart the services once when the server comes online.
```bash
dotf service stop --all
df # confirm JuiceFS is mounted
dotf service start --all
```
### Object Storage (Servers only)
In case you need to adjust anything regarding the /mnt/object_storage JuiceFS.
Ensure to shut down all services:
```bash
dotf service stop --all
```
Unmount the volume:
```bash
sudo systemctl stop juicefs
```
And optionally if you're going to do something with metadata you might need to stop redis too.
```bash
cd ~/services/juicefs-redis/
docker compose down --remove-orphans
```
### Adding a new system
To add a new system you should follow these steps:
@@ -103,34 +77,3 @@ To add a new system you should follow these steps:
1. Add the relevant files shown in the section above.
2. Ensure you've either updated or added the `$HOME/.hostname` file with the hostname of the system.
3. Run `dotf update` to ensure the symlinks are properly updated/created.
---
## Using 1Password SSH Agent with WSL2 (Windows 11)
This setup allows you to use your 1Password-managed SSH keys inside WSL2. The WSL-side steps are automated by Ansible. The following Windows-side steps must be performed manually:
### Windows-side Setup
1. **Enable 1Password SSH Agent**
- Open the 1Password app on Windows.
- Go to **Settings → Developer** and enable **"Use the SSH agent"**.
2. **Install npiperelay using winget**
- Open PowerShell and run the following command:
```sh
winget install albertony.npiperelay
```
- This will install the latest maintained fork of npiperelay and add it to your PATH automatically.
3. **Restart Windows Terminal**
- After completing the above steps, restart your Windows Terminal to ensure all changes take effect.
4. **Test the SSH Agent in WSL2**
- Open your WSL2 terminal and run:
```sh
ssh-add -l
```
- If your 1Password keys are listed, the setup is complete.
#### References
- [Using 1Password's SSH Agent with WSL2](https://dev.to/d4vsanchez/use-1password-ssh-agent-in-wsl-2j6m)
- [How to change the PATH environment variable in Windows](https://www.wikihow.com/Change-the-PATH-Environment-Variable-on-Windows)

21
bin/1password-agent-bridge.sh Executable file
View File

@@ -0,0 +1,21 @@
source $HOME/dotfiles/bin/helpers/functions.sh
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
# Check if is_wsl function returns true, don't continue if we are not on WSL
if ! is_wsl; then
return
fi
printfe "%s" "cyan" "Running in WSL, ensuring 1Password SSH-Agent relay is running..."
ALREADY_RUNNING=$(ps -auxww | grep -q "[n]piperelay.exe -ei -s //./pipe/openssh-ssh-agent"; echo $?)
if [[ $ALREADY_RUNNING != "0" ]]; then
if [[ -S $SSH_AUTH_SOCK ]]; then
rm $SSH_AUTH_SOCK
fi
(setsid socat UNIX-LISTEN:$SSH_AUTH_SOCK,fork EXEC:"npiperelay.exe -ei -s //./pipe/openssh-ssh-agent",nofork &) >/dev/null 2>&1
printfe "%s\n" "green" " [ Started ]"
exit 0
fi
printfe "%s\n" "green" " [ Already running ]"

View File

@@ -1,87 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import time
import subprocess
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, run_command
def check_command_exists(command):
"""Check if a command is available in the system"""
try:
subprocess.run(
["which", command],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return True
except subprocess.CalledProcessError:
return False
def list_screen_sessions():
"""List all screen sessions"""
success, output = run_command(["screen", "-ls"])
return output
def wipe_dead_sessions():
"""Check and clean up dead screen sessions"""
screen_list = list_screen_sessions()
if "Dead" in screen_list:
print("Found dead sessions, cleaning up...")
run_command(["screen", "-wipe"])
def is_app_running(app_name):
"""Check if an app is already running in a screen session"""
screen_list = list_screen_sessions()
return app_name in screen_list
def start_app(app_name, command):
"""Start an application in a screen session"""
printfe("green", f"Starting {app_name} with command: {command}...")
run_command(["screen", "-dmS", app_name] + command.split())
time.sleep(1) # Give it a moment to start
def main():
# Define dictionary with app_name => command mapping
apps = {
"vesktop": "vesktop",
"ktailctl": "flatpak run org.fkoehler.KTailctl",
"nemo-desktop": "nemo-desktop",
}
# Clean up dead sessions if any
wipe_dead_sessions()
print("Starting auto-start applications...")
for app_name, command in apps.items():
# Get the binary name (first part of the command)
command_binary = command.split()[0]
# Check if the command exists
if check_command_exists(command_binary):
# Check if the app is already running
if is_app_running(app_name):
printfe("yellow", f"{app_name} is already running. Skipping...")
continue
# Start the application
start_app(app_name, command)
# Display screen sessions
print(list_screen_sessions())
return 0
if __name__ == "__main__":
sys.exit(main())

37
bin/actions/auto-start.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
apps=(
"spotify"
"whatsapp-for-linux"
"telegram-desktop"
"vesktop"
"trayscale"
"1password"
"ulauncher-wrapped --no-window-shadow --hide-window"
"polkit-agent"
"swaync"
"nm-applet"
"blueman-applet"
)
# check if screen has any dead sessions
if screen -list | grep -q "Dead"; then
screen -wipe
fi
echo "Starting auto-start applications..."
for app in "${apps[@]}"; do
app_name=$(echo $app | awk '{print $1}')
app_params=$(echo $app | cut -d' ' -f2-)
if [ -x "$(command -v $app_name)" ]; then
if screen -list | grep -q $app_name; then
echo "$app_name is already running. Skipping..."
continue
fi
echo "Starting $app_name with parameters $app_params..."
screen -dmS $app_name $app_name $app_params
sleep 1
fi
done

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
from datetime import datetime
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, logo, _rainbow_color, COLORS
def get_last_ssh_login():
"""Get information about the last SSH login"""
try:
result = subprocess.run(
["lastlog", "-u", os.environ.get("USER", "")],
capture_output=True,
text=True,
)
# If lastlog didn't work try lastlog2
if result.returncode != 0:
result = subprocess.run(
["lastlog2", os.environ.get("USER", "")], capture_output=True, text=True
)
if result.returncode == 0:
lines = result.stdout.strip().split("\n")
if len(lines) >= 2: # Header line + data line
# Parse the last login line - example format:
# menno ssh 100.99.23.98 Mon Mar 10 19:09:43 +0100 2025
parts = lines[1].split()
if len(parts) >= 7 and "ssh" in parts[1]: # Check if it's an SSH login
# Extract IP address from the third column
ip = parts[2]
# Time is the rest of the line starting from position 3
time_str = " ".join(parts[3:])
return f"{COLORS['cyan']}Last SSH login{COLORS['reset']}{COLORS['yellow']} {time_str}{COLORS['cyan']} from{COLORS['yellow']} {ip}"
return None
except Exception as e:
# For debugging, you might want to print the exception
# print(f"Error getting SSH login: {str(e)}")
return None
def check_dotfiles_status():
"""Check if the dotfiles repository is dirty"""
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
try:
if not os.path.isdir(os.path.join(dotfiles_path, ".git")):
return None
# Check for git status details
status = {
"is_dirty": False,
"untracked": 0,
"modified": 0,
"staged": 0,
"commit_hash": "",
"unpushed": 0,
}
# Get status of files
result = subprocess.run(
["git", "status", "--porcelain"],
cwd=dotfiles_path,
capture_output=True,
text=True,
)
if result.stdout.strip():
status["is_dirty"] = True
for line in result.stdout.splitlines():
if line.startswith("??"):
status["untracked"] += 1
if line.startswith(" M") or line.startswith("MM"):
status["modified"] += 1
if line.startswith("M ") or line.startswith("A "):
status["staged"] += 1
# Get current commit hash
result = subprocess.run(
["git", "rev-parse", "--short", "HEAD"],
cwd=dotfiles_path,
capture_output=True,
text=True,
)
if result.returncode == 0:
status["commit_hash"] = result.stdout.strip()
# Count unpushed commits
# Fix: Remove capture_output and set stdout explicitly
result = subprocess.run(
["git", "log", "--oneline", "@{u}.."],
cwd=dotfiles_path,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
)
if result.returncode == 0:
status["unpushed"] = len(result.stdout.splitlines())
return status
except Exception as e:
print(f"Error checking dotfiles status: {str(e)}")
return None
def get_condensed_status():
"""Generate a condensed status line for trash and git status"""
status_parts = []
# Check trash status
trash_path = os.path.expanduser("~/.local/share/Trash/files")
try:
if os.path.exists(trash_path):
items = os.listdir(trash_path)
count = len(items)
if count > 0:
status_parts.append(f"[!] {count} file(s) in trash")
except Exception:
pass
# Check dotfiles status
dotfiles_status = check_dotfiles_status()
if dotfiles_status is not None:
if dotfiles_status["is_dirty"]:
status_parts.append(f"{COLORS['yellow']}dotfiles is dirty{COLORS['reset']}")
status_parts.append(
f"{COLORS['red']}[{dotfiles_status['untracked']}] untracked{COLORS['reset']}"
)
status_parts.append(
f"{COLORS['yellow']}[{dotfiles_status['modified']}] modified{COLORS['reset']}"
)
status_parts.append(
f"{COLORS['green']}[{dotfiles_status['staged']}] staged{COLORS['reset']}"
)
if dotfiles_status["commit_hash"]:
status_parts.append(
f"{COLORS['white']}[{COLORS['blue']}{dotfiles_status['commit_hash']}{COLORS['white']}]{COLORS['reset']}"
)
if dotfiles_status["unpushed"] > 0:
status_parts.append(
f"{COLORS['yellow']}[!] You have {dotfiles_status['unpushed']} commit(s) to push{COLORS['reset']}"
)
else:
status_parts.append("Unable to check dotfiles status")
if status_parts:
return " - ".join(status_parts)
return None
def welcome():
"""Display welcome message with hostname and username"""
print()
# Get hostname and username
hostname = os.uname().nodename
username = os.environ.get("USER", os.environ.get("USERNAME", "user"))
# Get SSH login info first
ssh_login = get_last_ssh_login()
print(f"{COLORS['cyan']}You're logged in on [", end="")
print(_rainbow_color(hostname), end="")
print(f"{COLORS['cyan']}] as [", end="")
print(_rainbow_color(username), end="")
print(f"{COLORS['cyan']}]{COLORS['reset']}")
# Display last SSH login info if available
if ssh_login:
print(f"{ssh_login}{COLORS['reset']}")
# Display condensed status line
condensed_status = get_condensed_status()
if condensed_status:
print(f"{COLORS['yellow']}{condensed_status}{COLORS['reset']}")
def main():
logo(continue_after=True)
welcome()
return 0
if __name__ == "__main__":
sys.exit(main())

20
bin/actions/hello.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
welcome() {
echo
tput setaf 6
printf "You're logged in on ["
printf $HOSTNAME | lolcat
tput setaf 6
printf "] as "
printf "["
printf $USER | lolcat
tput setaf 6
printf "]\n"
tput sgr0
}
logo continue
welcome

View File

@@ -1,30 +0,0 @@
#!/usr/bin/env python3
import os
import sys
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, println, logo
def main():
# Print logo
logo(continue_after=True)
# Print help
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
try:
with open(f"{dotfiles_path}/bin/resources/help.txt", "r") as f:
help_text = f.read()
print(help_text)
except Exception as e:
printfe("red", f"Error reading help file: {e}")
return 1
println(" ", "cyan")
return 0
if __name__ == "__main__":
sys.exit(main())

10
bin/actions/help.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
# Print logo
logo
# Print help
cat $HOME/dotfiles/bin/resources/help.txt
println " " "cyan"

View File

@@ -1,179 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
import argparse
from pathlib import Path
# Import helper functions
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__))))
from helpers.functions import printfe, command_exists
DOTFILES_ROOT = os.path.expanduser("~/.dotfiles")
def lint_ansible(fix=False):
"""Run ansible-lint on Ansible files"""
ansible_dir = os.path.join(DOTFILES_ROOT, "config/ansible")
if not os.path.isdir(ansible_dir):
printfe("yellow", "No ansible directory found at config/ansible")
return 0
# Find all YAML files in the ansible directory
yaml_files = []
for ext in [".yml", ".yaml"]:
yaml_files.extend(list(Path(ansible_dir).glob(f"**/*{ext}")))
if not yaml_files:
printfe("yellow", "No Ansible files found in config/ansible to lint")
return 0
if not command_exists("ansible-lint"):
printfe(
"red",
"ansible-lint is not installed. Please install it with pip or your package manager.",
)
return 1
printfe("blue", f"Running ansible-lint{' with auto-fix' if fix else ''}...")
files_to_lint = [str(f) for f in yaml_files]
command = ["ansible-lint"]
if fix:
command.append("--fix")
command.extend(files_to_lint)
result = subprocess.run(command, check=False)
return result.returncode
def lint_nix():
"""Run nixfmt on Nix files"""
nix_files = list(Path(DOTFILES_ROOT).glob("**/*.nix"))
if not nix_files:
printfe("yellow", "No Nix files found to lint")
return 0
if not command_exists("nixfmt"):
printfe(
"red",
"nixfmt is not installed. Please install it with nix-env or your package manager.",
)
return 1
printfe("blue", "Running nixfmt...")
exit_code = 0
for nix_file in nix_files:
printfe("cyan", f"Formatting {nix_file}")
result = subprocess.run(["nixfmt", str(nix_file)], check=False)
if result.returncode != 0:
exit_code = 1
return exit_code
def lint_python(fix=False):
"""Run pylint and black on Python files"""
python_files = list(Path(DOTFILES_ROOT).glob("**/*.py"))
if not python_files:
printfe("yellow", "No Python files found to lint")
return 0
exit_code = 0
# Check for pylint
if command_exists("pylint"):
printfe("blue", "Running pylint...")
files_to_lint = [str(f) for f in python_files]
result = subprocess.run(["pylint"] + files_to_lint, check=False)
if result.returncode != 0:
exit_code = 1
else:
printfe("yellow", "pylint is not installed. Skipping Python linting.")
# Check for black
if command_exists("black"):
printfe(
"blue", f"Running black{'--check' if not fix else ''} on Python files..."
)
black_args = ["black"]
if not fix:
black_args.append("--check")
black_args.extend([str(f) for f in python_files])
result = subprocess.run(black_args, check=False)
if result.returncode != 0:
exit_code = 1
else:
printfe("yellow", "black is not installed. Skipping Python formatting.")
if not command_exists("pylint") and not command_exists("black"):
printfe(
"red",
"Neither pylint nor black is installed. Please run: `pip install pylint black`",
)
return 1
return exit_code
def main():
"""
Entry point for running linters on dotfiles.
This function parses command-line arguments to determine which linters to run
and whether to apply auto-fixes. It supports running linters for Ansible, Nix,
and Python files. If no specific linter is specified, all linters are executed.
Command-line arguments:
--ansible: Run only ansible-lint.
--nix: Run only nixfmt.
--python: Run only Python linters (pylint, black).
--fix: Auto-fix issues where possible.
Returns:
int: Exit code indicating the success or failure of the linting process.
A non-zero value indicates that one or more linters reported issues.
"""
parser = argparse.ArgumentParser(description="Run linters on dotfiles")
parser.add_argument("--ansible", action="store_true", help="Run only ansible-lint")
parser.add_argument("--nix", action="store_true", help="Run only nixfmt")
parser.add_argument(
"--python", action="store_true", help="Run only Python linters (pylint, black)"
)
parser.add_argument(
"--fix", action="store_true", help="Auto-fix issues where possible"
)
args = parser.parse_args()
# If no specific linter is specified, run all
run_ansible = args.ansible or not (args.ansible or args.nix or args.python)
run_nix = args.nix or not (args.ansible or args.nix or args.python)
run_python = args.python or not (args.ansible or args.nix or args.python)
exit_code = 0
if run_ansible:
ansible_result = lint_ansible(fix=args.fix)
if ansible_result != 0:
exit_code = ansible_result
if run_nix:
nix_result = lint_nix()
if nix_result != 0:
exit_code = nix_result
if run_python:
python_result = lint_python(fix=args.fix)
if python_result != 0:
exit_code = python_result
return exit_code
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,185 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
import hashlib
import glob
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, run_command
def get_password():
"""Get password from 1Password"""
op_cmd = "op"
# Try to get the password
success, output = run_command(
[op_cmd, "read", "op://Dotfiles/Dotfiles Secrets/password"]
)
if not success:
printfe("red", "Failed to fetch password from 1Password.")
return None
# Check if we need to use a token
if "use 'op item get" in output:
# Extract the token
token = output.split("use 'op item get ")[1].split(" --")[0]
printfe("cyan", f"Got fetch token: {token}")
# Use the token to get the actual password
success, password = run_command(
[op_cmd, "item", "get", token, "--reveal", "--fields", "password"]
)
if not success:
return None
return password
else:
# We already got the password
return output
def prompt_for_password():
"""Ask for password manually"""
import getpass
printfe("cyan", "Enter the password manually: ")
password = getpass.getpass("")
if not password:
printfe("red", "Password cannot be empty.")
sys.exit(1)
printfe("green", "Password entered successfully.")
return password
def calculate_checksum(file_path):
"""Calculate SHA256 checksum of a file"""
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def encrypt_folder(folder_path, password):
"""Recursively encrypt files in a folder"""
for item in glob.glob(os.path.join(folder_path, "*")):
# Skip .gpg and .sha256 files
if item.endswith(".gpg") or item.endswith(".sha256"):
continue
# Handle directories recursively
if os.path.isdir(item):
encrypt_folder(item, password)
continue
# Calculate current checksum
current_checksum = calculate_checksum(item)
checksum_file = f"{item}.sha256"
# Check if file changed since last encryption
if os.path.exists(checksum_file):
with open(checksum_file, "r") as f:
previous_checksum = f.read().strip()
if current_checksum == previous_checksum:
continue
# Remove existing .gpg file if it exists
gpg_file = f"{item}.gpg"
if os.path.exists(gpg_file):
os.remove(gpg_file)
# Encrypt the file
printfe("cyan", f"Encrypting {item}...")
cmd = [
"gpg",
"--quiet",
"--batch",
"--yes",
"--symmetric",
"--cipher-algo",
"AES256",
"--armor",
"--passphrase",
password,
"--output",
gpg_file,
item,
]
success, _ = run_command(cmd)
if success:
printfe("cyan", f"Staging {item} for commit...")
run_command(["git", "add", "-f", gpg_file])
# Update checksum file
with open(checksum_file, "w") as f:
f.write(current_checksum)
else:
printfe("red", f"Failed to encrypt {item}")
def decrypt_folder(folder_path, password):
"""Recursively decrypt files in a folder"""
for item in glob.glob(os.path.join(folder_path, "*")):
# Handle .gpg files
if item.endswith(".gpg"):
output_file = item[:-4] # Remove .gpg extension
printfe("cyan", f"Decrypting {item}...")
cmd = [
"gpg",
"--quiet",
"--batch",
"--yes",
"--decrypt",
"--passphrase",
password,
"--output",
output_file,
item,
]
success, _ = run_command(cmd)
if not success:
printfe("red", f"Failed to decrypt {item}")
# Process directories recursively
elif os.path.isdir(item):
printfe("cyan", f"Decrypting folder {item}...")
decrypt_folder(item, password)
def main():
if len(sys.argv) != 2 or sys.argv[1] not in ["encrypt", "decrypt"]:
printfe("red", "Usage: secrets.py [encrypt|decrypt]")
return 1
# Get the dotfiles path
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
secrets_path = os.path.join(dotfiles_path, "secrets")
# Get the password
password = get_password()
if not password:
password = prompt_for_password()
# Perform the requested action
if sys.argv[1] == "encrypt":
printfe("cyan", "Encrypting secrets...")
encrypt_folder(secrets_path, password)
else: # decrypt
printfe("cyan", "Decrypting secrets...")
decrypt_folder(secrets_path, password)
return 0
if __name__ == "__main__":
sys.exit(main())

115
bin/actions/secrets.sh Executable file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
if is_wsl; then
output=$(op.exe item get "Dotfiles Secrets" --fields password)
else
output=$(op item get "Dotfiles Secrets" --fields password)
fi
# Check if command was a success
if [[ $? -ne 0 ]]; then
printfe "%s\n" "red" "Failed to fetch password from 1Password."
fi
# In case the output does not contain use 'op item get, it means the password was fetched successfully
# Without having to reveal the password using an external command
if [[ ! $output == *"use 'op item get"* ]]; then
password=$output
else
token=$(echo "$output" | grep -oP "(?<=\[use 'op item get ).*(?= --)")
printfe "%s\n" "cyan" "Got fetch token: $token"
if is_wsl; then
password=$(op.exe item get $token --reveal --field password)
else
password=$(op item get $token --reveal --fields password)
fi
fi
# only continue if password isn't empty
if [[ -z "$password" ]]; then
printfe "%s\n" "red" "Something went wrong while fetching the password from 1Password."
# Ask for manual input
printfe "%s" "cyan" "Enter the password manually: "
read -s password
echo
if [[ -z "$password" ]]; then
printfe "%s\n" "red" "Password cannot be empty."
exit 1
fi
printfe "%s\n" "green" "Password entered successfully."
fi
encrypt_folder() {
for file in $1/*; do
# Skip if the current file is a .gpg file
if [[ $file == *.gpg ]]; then
continue
fi
# Skip if the current file is a .sha256 file
if [[ $file == *.sha256 ]]; then
continue
fi
# If the file is a directory, call this function recursively
if [[ -d $file ]]; then
encrypt_folder $file
continue
fi
current_checksum=$(sha256sum "$file" | awk '{ print $1 }')
checksum_file="$file.sha256"
if [[ -f $checksum_file ]]; then
previous_checksum=$(cat $checksum_file)
if [[ $current_checksum == $previous_checksum ]]; then
continue
fi
fi
# If the file has an accompanying .gpg file, remove it
if [[ -f $file.gpg ]]; then
rm "$file.gpg"
fi
printfe "%s\n" "cyan" "Encrypting $file..."
gpg --quiet --batch --yes --symmetric --cipher-algo AES256 --armor --passphrase="$password" --output "$file.gpg" "$file"
# Update checksum file
echo $current_checksum > "$checksum_file"
done
}
# Recursively decrypt all .gpg files under the folder specified, recursively call this function for sub folders!
# Keep the original file name minus the .gpg extension
decrypt_folder() {
for file in $1/*; do
# Skip if current file is a .gpg file
if [[ $file == *.gpg ]]; then
filename=$(basename $file .gpg)
printfe "%s\n" "cyan" "Decrypting $file..."
gpg --quiet --batch --yes --decrypt --passphrase="$password" --output $1/$filename $file
fi
# If file is actually a folder, call this function recursively
if [[ -d $file ]]; then
printfe "%s\n" "cyan" "Decrypting folder $file..."
decrypt_folder $file
fi
done
}
if [[ "$1" == "decrypt" ]]; then
printfe "%s\n" "cyan" "Decrypting secrets..."
decrypt_folder ~/dotfiles/secrets
elif [[ "$1" == "encrypt" ]]; then
printfe "%s\n" "cyan" "Encrypting secrets..."
encrypt_folder ~/dotfiles/secrets
fi

View File

@@ -1,445 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
import argparse
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, println, logo
# Base directory for Docker services $HOME/services
SERVICES_DIR = os.path.join(os.path.expanduser("~"), ".services")
# Protected services that should never be stopped
PROTECTED_SERVICES = ["juicefs-redis"]
def get_service_path(service_name):
"""Return the path to a service's docker-compose file"""
service_dir = os.path.join(SERVICES_DIR, service_name)
compose_file = os.path.join(service_dir, "docker-compose.yml")
if not os.path.exists(compose_file):
printfe("red", f"Error: Service '{service_name}' not found at {compose_file}")
return None
return compose_file
def run_docker_compose(args, service_name=None, compose_file=None):
"""Run docker compose command with provided args"""
if service_name and not compose_file:
compose_file = get_service_path(service_name)
if not compose_file:
return 1
cmd = ["docker", "compose"]
if compose_file:
cmd.extend(["-f", compose_file])
cmd.extend(args)
printfe("blue", f"Running: {' '.join(cmd)}")
result = subprocess.run(cmd)
return result.returncode
def get_all_services():
"""Return a list of all available services"""
if not os.path.exists(SERVICES_DIR):
return []
services = [
d
for d in os.listdir(SERVICES_DIR)
if os.path.isdir(os.path.join(SERVICES_DIR, d))
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
]
return sorted(services)
def cmd_start(args):
"""Start a Docker service"""
if args.all:
services = get_all_services()
if not services:
printfe("yellow", "No services found to start")
return 0
printfe("blue", f"Starting all services: {', '.join(services)}")
failed_services = []
for service in services:
printfe("blue", f"\n=== Starting {service} ===")
result = run_docker_compose(["up", "-d"], service_name=service)
if result != 0:
failed_services.append(service)
if failed_services:
printfe(
"red",
f"\nFailed to start the following services: {', '.join(failed_services)}",
)
return 1
else:
printfe("green", "\nAll services started successfully")
return 0
else:
return run_docker_compose(["up", "-d"], service_name=args.service)
def cmd_stop(args):
"""Stop a Docker service"""
if args.all:
running_services = get_all_running_services()
if not running_services:
printfe("yellow", "No running services found to stop")
return 0
# Filter out the protected services
safe_services = [s for s in running_services if s not in PROTECTED_SERVICES]
# Check if protected services were filtered out
protected_running = [s for s in running_services if s in PROTECTED_SERVICES]
if protected_running:
printfe(
"yellow",
f"Note: {', '.join(protected_running)} will not be stopped as they are protected services",
)
if not safe_services:
printfe(
"yellow", "No services to stop (all running services are protected)"
)
return 0
printfe("blue", f"Stopping all running services: {', '.join(safe_services)}")
failed_services = []
for service in safe_services:
printfe("blue", f"\n=== Stopping {service} ===")
result = run_docker_compose(["down"], service_name=service)
if result != 0:
failed_services.append(service)
if failed_services:
printfe(
"red",
f"\nFailed to stop the following services: {', '.join(failed_services)}",
)
return 1
else:
printfe("green", "\nAll running services stopped successfully")
return 0
else:
# Check if trying to stop a protected service
if args.service in PROTECTED_SERVICES:
printfe(
"red",
f"Error: {args.service} is a protected service and cannot be stopped",
)
printfe(
"yellow",
f"The {args.service} service is required for other services to work properly",
)
return 1
return run_docker_compose(["down"], service_name=args.service)
def cmd_restart(args):
"""Restart a Docker service"""
return run_docker_compose(["restart"], service_name=args.service)
def get_all_running_services():
"""Return a list of all running services"""
if not os.path.exists(SERVICES_DIR):
return []
running_services = []
services = [
d
for d in os.listdir(SERVICES_DIR)
if os.path.isdir(os.path.join(SERVICES_DIR, d))
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
]
for service in services:
if check_service_running(service) > 0:
running_services.append(service)
return running_services
def cmd_update(args):
"""Update a Docker service by pulling new images and recreating containers if needed"""
if args.all:
running_services = get_all_running_services()
if not running_services:
printfe("yellow", "No running services found to update")
return 0
printfe("blue", f"Updating all running services: {', '.join(running_services)}")
failed_services = []
for service in running_services:
printfe("blue", f"\n=== Updating {service} ===")
# Pull the latest images
pull_result = run_docker_compose(["pull"], service_name=service)
# Bring the service up with the latest images
up_result = run_docker_compose(["up", "-d"], service_name=service)
if pull_result != 0 or up_result != 0:
failed_services.append(service)
if failed_services:
printfe(
"red",
f"\nFailed to update the following services: {', '.join(failed_services)}",
)
return 1
else:
printfe("green", "\nAll running services updated successfully")
return 0
else:
# The original single-service update logic
# First pull the latest images
pull_result = run_docker_compose(["pull"], service_name=args.service)
if pull_result != 0:
return pull_result
# Then bring the service up with the latest images
return run_docker_compose(["up", "-d"], service_name=args.service)
def cmd_ps(args):
"""Show Docker service status"""
if args.service:
return run_docker_compose(["ps"], service_name=args.service)
else:
return run_docker_compose(["ps"])
def cmd_logs(args):
"""Show Docker service logs"""
cmd = ["logs"]
if args.follow:
cmd.append("-f")
if args.tail:
cmd.extend(["--tail", args.tail])
return run_docker_compose(cmd, service_name=args.service)
def check_service_running(service_name):
"""Check if service has running containers and return the count"""
compose_file = get_service_path(service_name)
if not compose_file:
return 0
result = subprocess.run(
["docker", "compose", "-f", compose_file, "ps", "--quiet"],
capture_output=True,
text=True,
)
# Count non-empty lines to get container count
containers = [line for line in result.stdout.strip().split("\n") if line]
return len(containers)
def get_systemd_timer_status(timer_name):
"""Check if a systemd timer is active and enabled, and get next run time"""
# Check if timer is active (running/waiting)
active_result = subprocess.run(
["sudo", "systemctl", "is-active", timer_name],
capture_output=True,
text=True
)
# Check if timer is enabled (will start on boot)
enabled_result = subprocess.run(
["sudo", "systemctl", "is-enabled", timer_name],
capture_output=True,
text=True
)
# Check corresponding service status
service_name = timer_name.replace('.timer', '.service')
service_result = subprocess.run(
["sudo", "systemctl", "is-active", service_name],
capture_output=True,
text=True
)
# Get next run time
list_result = subprocess.run(
["sudo", "systemctl", "list-timers", timer_name, "--no-legend"],
capture_output=True,
text=True
)
is_active = active_result.returncode == 0
is_enabled = enabled_result.returncode == 0
service_status = service_result.stdout.strip() if service_result else "unknown"
next_run = "unknown"
if list_result.returncode == 0 and list_result.stdout.strip():
parts = list_result.stdout.strip().split()
if len(parts) >= 4:
next_run = f"{parts[0]} {parts[1]} {parts[2]}"
return is_active, is_enabled, next_run, service_status
def cmd_list(args):
"""List available Docker services and systemd services"""
# Docker services section
if not os.path.exists(SERVICES_DIR):
printfe("red", f"Error: Services directory not found at {SERVICES_DIR}")
return 1
services = [
d
for d in os.listdir(SERVICES_DIR)
if os.path.isdir(os.path.join(SERVICES_DIR, d))
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
]
if not services:
printfe("yellow", "No Docker services found")
else:
println("Available Docker services:", "blue")
for service in sorted(services):
container_count = check_service_running(service)
is_running = container_count > 0
if is_running:
status = f"[RUNNING - {container_count} container{'s' if container_count > 1 else ''}]"
color = "green"
else:
status = "[STOPPED]"
color = "red"
printfe(color, f" - {service:<20} {status}")
# Systemd services section
print()
println("System services:", "blue")
systemd_timers = ["borg-backup.timer", "borg-local-sync.timer", "dynamic-dns.timer"]
for timer in systemd_timers:
is_active, is_enabled, next_run, service_status = get_systemd_timer_status(timer)
service_name = timer.replace('.timer', '')
if service_status in ["activating", "active"]:
# Service is currently running
status = f"[🔄 RUNNING - next: {next_run}]"
color = "yellow"
elif is_active and is_enabled:
status = f"[TIMER ACTIVE - next: {next_run}]"
color = "green"
elif is_enabled:
status = "[TIMER ENABLED - INACTIVE]"
color = "yellow"
else:
status = "[TIMER DISABLED]"
color = "red"
printfe(color, f" - {service_name:<20} {status}")
return 0
def main():
parser = argparse.ArgumentParser(description="Manage Docker services")
subparsers = parser.add_subparsers(dest="command", help="Command to run")
# Start command
start_parser = subparsers.add_parser("start", help="Start a Docker service")
start_group = start_parser.add_mutually_exclusive_group(required=True)
start_group.add_argument("--all", action="store_true", help="Start all services")
start_group.add_argument("service", nargs="?", help="Service to start")
start_group.add_argument(
"--service", dest="service", help="Service to start (deprecated)"
)
# Stop command
stop_parser = subparsers.add_parser("stop", help="Stop a Docker service")
stop_group = stop_parser.add_mutually_exclusive_group(required=True)
stop_group.add_argument(
"--all", action="store_true", help="Stop all running services"
)
stop_group.add_argument("service", nargs="?", help="Service to stop")
stop_group.add_argument(
"--service", dest="service", help="Service to stop (deprecated)"
)
# Restart command
restart_parser = subparsers.add_parser("restart", help="Restart a Docker service")
restart_parser.add_argument("service", help="Service to restart")
# Update command
update_parser = subparsers.add_parser(
"update",
help="Update a Docker service (pull new images and recreate if needed)",
)
update_group = update_parser.add_mutually_exclusive_group(required=True)
update_group.add_argument(
"--all", action="store_true", help="Update all running services"
)
update_group.add_argument("service", nargs="?", help="Service to update")
update_group.add_argument(
"--service", dest="service", help="Service to update (deprecated)"
)
# PS command
ps_parser = subparsers.add_parser("ps", help="Show Docker service status")
ps_parser.add_argument("service", nargs="?", help="Service to check")
# Logs command
logs_parser = subparsers.add_parser("logs", help="Show Docker service logs")
logs_parser.add_argument("service", help="Service to show logs for")
logs_parser.add_argument(
"-f", "--follow", action="store_true", help="Follow log output"
)
logs_parser.add_argument(
"--tail", help="Number of lines to show from the end of logs"
)
# List command and its alias
subparsers.add_parser("list", help="List available Docker services")
subparsers.add_parser("ls", help="List available Docker services (alias for list)")
# Parse arguments
args = parser.parse_args()
if not args.command:
parser.print_help()
return 1
# Execute the appropriate command
commands = {
"start": cmd_start,
"stop": cmd_stop,
"restart": cmd_restart,
"update": cmd_update,
"ps": cmd_ps,
"logs": cmd_logs,
"list": cmd_list,
"ls": cmd_list, # Alias 'ls' to the same function as 'list'
}
return commands[args.command](args)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,81 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
# Add the helpers directory to the path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'helpers'))
from functions import printfe
def get_borg_passphrase():
"""Get Borg passphrase from 1Password"""
try:
result = subprocess.run(
["op", "item", "get", "Borg Backup", "--vault=Dotfiles", "--fields=password", "--reveal"],
capture_output=True,
text=True,
check=True
)
return result.stdout.strip()
except subprocess.CalledProcessError:
printfe("red", "Error: Failed to retrieve Borg passphrase from 1Password")
return None
def main():
"""Generate export commands for Borg environment variables"""
args = sys.argv[1:] if len(sys.argv) > 1 else []
# Get passphrase from 1Password
passphrase = get_borg_passphrase()
if not passphrase:
return 1
# Generate the export commands
exports = [
f'export BORG_REPO="/mnt/object_storage/borg-repo"',
f'export BORG_PASSPHRASE="{passphrase}"',
f'export BORG_CACHE_DIR="/home/menno/.config/borg/cache"',
f'export BORG_CONFIG_DIR="/home/menno/.config/borg/config"',
f'export BORG_SECURITY_DIR="/home/menno/.config/borg/security"',
f'export BORG_KEYS_DIR="/home/menno/.config/borg/keys"'
]
# Check if we're being eval'd (no arguments and stdout is a pipe)
if not args and not os.isatty(sys.stdout.fileno()):
# Just output the export commands for eval
for export_cmd in exports:
print(export_cmd)
return 0
# Print instructions and examples
printfe("cyan", "🔧 Borg Environment Setup")
print()
printfe("yellow", "Run the following command to setup your shell:")
print()
printfe("green", "eval $(dotf source)")
print()
printfe("red", "⚠️ Repository Permission Issue:")
printfe("white", "The Borg repository was created by root, so you need sudo:")
print()
printfe("green", "sudo -E borg list")
printfe("green", "sudo -E borg info")
print()
printfe("yellow", "Or copy and paste these exports:")
print()
# Output the export commands
for export_cmd in exports:
print(export_cmd)
print()
printfe("cyan", "📋 Borg commands (use with sudo -E):")
printfe("white", " sudo -E borg list # List all backups")
printfe("white", " sudo -E borg info # Repository info")
printfe("white", " sudo -E borg list ::archive-name # List files in backup")
printfe("white", " sudo -E borg mount . ~/borg-mount # Mount as filesystem")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,122 +0,0 @@
#!/usr/bin/env python3
import os
import subprocess
import sys
# Add the helpers directory to the path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'helpers'))
from functions import printfe
def run_command(cmd, capture_output=True):
"""Run a command and return the result"""
try:
result = subprocess.run(cmd, shell=True, capture_output=capture_output, text=True)
return result
except Exception as e:
printfe("red", f"Error running command: {e}")
return None
def show_timer_status(timer_name, system_level=True):
"""Show concise status for a specific timer"""
cmd_prefix = "sudo systemctl" if system_level else "systemctl --user"
# Get timer status
status_cmd = f"{cmd_prefix} is-active {timer_name}"
status_result = run_command(status_cmd)
timer_status = "active" if status_result and status_result.returncode == 0 else "inactive"
# Get corresponding service status
service_name = timer_name.replace('.timer', '.service')
service_cmd = f"{cmd_prefix} is-active {service_name}"
service_result = run_command(service_cmd)
service_status = service_result.stdout.strip() if service_result else "unknown"
# Get next run time
list_cmd = f"{cmd_prefix} list-timers {timer_name} --no-legend"
list_result = run_command(list_cmd)
next_run = "unknown"
if list_result and list_result.returncode == 0 and list_result.stdout.strip():
parts = list_result.stdout.strip().split()
if len(parts) >= 4:
next_run = f"{parts[0]} {parts[1]} {parts[2]} ({parts[3]})"
# Format output based on service status
service_short = service_name.replace('.service', '')
if service_status in ["activating", "active"]:
# Service is currently running
status_color = "yellow"
status_text = f"RUNNING next: {next_run}"
symbol = "🔄"
elif timer_status == "active":
# Timer is active but service is not running
status_color = "green"
status_text = f"active next: {next_run}"
symbol = ""
else:
# Timer is inactive
status_color = "red"
status_text = f"inactive next: {next_run}"
symbol = ""
printfe(status_color, f"{symbol} {service_short:<12} {status_text}")
def show_examples():
"""Show example commands for checking services and logs"""
printfe("cyan", "=== Useful Commands ===")
print()
printfe("yellow", "Check service status:")
print(" sudo systemctl status borg-backup.service")
print(" sudo systemctl status borg-local-sync.service")
print(" sudo systemctl status dynamic-dns.service")
print()
printfe("yellow", "View logs:")
print(" sudo journalctl -u borg-backup.service -f")
print(" sudo journalctl -u borg-local-sync.service -f")
print(" sudo journalctl -u dynamic-dns.service -f")
print(" tail -f /var/log/borg-backup.log")
print(" tail -f /var/log/borg-local-sync.log")
print()
printfe("yellow", "Manual trigger:")
print(" sudo systemctl start borg-backup.service")
print(" sudo systemctl start borg-local-sync.service")
print(" sudo systemctl start dynamic-dns.service")
print()
printfe("yellow", "List all timers:")
print(" sudo systemctl list-timers")
print()
def main():
"""Main timers action"""
args = sys.argv[1:] if len(sys.argv) > 1 else []
printfe("cyan", "🕐 System Timers")
print()
# Show timer statuses
timers = [
("borg-backup.timer", True),
("borg-local-sync.timer", True),
("dynamic-dns.timer", True)
]
for timer_name, system_level in timers:
if os.path.exists(f"/etc/systemd/system/{timer_name}"):
show_timer_status(timer_name, system_level)
else:
printfe("yellow", f" {timer_name.replace('.timer', ''):<12} not found")
print()
# Show helpful examples
show_examples()
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,430 +0,0 @@
#!/usr/bin/env python3
import os
import sys
import subprocess
import argparse
# Import helper functions
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
from helpers.functions import printfe, run_command
def help_message():
"""Print help message and exit"""
printfe("green", "Usage: upgrade.py [options]")
printfe("green", "Options:")
printfe("green", " --ha, -H Upgrade Home Manager packages.")
printfe("green", " --ansible, -A Upgrade Ansible packages.")
printfe(
"green",
" --ansible-verbose Upgrade Ansible packages with verbose output. (-vvv)",
)
printfe(
"green",
" --tags TAG Run only specific Ansible tags (e.g., --tags caddy).",
)
printfe(
"green",
" --full-speed, -F Upgrade packages and use all available cores for compilation. (Default: 8 cores)",
)
printfe("green", " --skip-check, -s Skip checking for dotfiles updates.")
printfe("green", " --help, -h Display this help message.")
return 0
def check_git_repository():
"""Check for changes in the dotfiles git repository and prompt user to pull if needed"""
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
printfe("cyan", "Checking for updates in dotfiles repository...")
# Change to dotfiles directory
current_dir = os.getcwd()
os.chdir(dotfiles_path)
# Check if this is a git repository
status, _ = run_command(["git", "rev-parse", "--is-inside-work-tree"], shell=False)
if not status:
printfe("red", "The dotfiles directory is not a git repository.")
os.chdir(current_dir)
return False
# Get the current branch name
status, current_branch = run_command(
["git", "rev-parse", "--abbrev-ref", "HEAD"], shell=False
)
if not status:
printfe("red", "Failed to determine current branch.")
os.chdir(current_dir)
return False
current_branch = current_branch.strip()
# Fetch the latest changes
status, output = run_command(["git", "fetch"], shell=False)
if not status:
printfe(
"yellow", f"Warning: Failed to fetch changes from git repository: {output}"
)
printfe("yellow", "Continuing update process without repository check...")
os.chdir(current_dir)
return True
# Check if remote branch exists
status, output = run_command(
["git", "ls-remote", "--heads", "origin", current_branch], shell=False
)
if not status or not output.strip():
printfe(
"yellow",
f"Remote branch 'origin/{current_branch}' not found. Using local branch only.",
)
os.chdir(current_dir)
return True
# Check if we're behind the remote
status, output = run_command(
["git", "rev-list", f"HEAD..origin/{current_branch}", "--count"], shell=False
)
if not status:
printfe("red", f"Failed to check for repository updates: {output}")
os.chdir(current_dir)
return False
behind_count = output.strip()
if behind_count == "0":
printfe(
"green", f"Dotfiles repository is up to date on branch '{current_branch}'."
)
os.chdir(current_dir)
return True
# Show what changes are available
status, output = run_command(
["git", "log", f"HEAD..origin/{current_branch}", "--oneline"], shell=False
)
if status:
printfe(
"yellow",
f"Your dotfiles repository is {behind_count} commit(s) behind on branch '{current_branch}'. Changes:",
)
for line in output.strip().splitlines():
printfe("yellow", f"{line}")
else:
printfe(
"yellow",
f"Your dotfiles repository is {behind_count} commit(s) behind on branch '{current_branch}'.",
)
# Ask user if they want to pull changes
response = input("Do you want to pull these changes? (yes/no): ").strip().lower()
if response in ["yes", "y"]:
status, output = run_command(
["git", "pull", "origin", current_branch], shell=False
)
if not status:
printfe("red", f"Failed to pull changes: {output}")
os.chdir(current_dir)
return False
printfe("green", "Successfully updated dotfiles repository.")
else:
printfe("yellow", "Skipping repository update.")
os.chdir(current_dir)
return True
def ensure_ansible_collections():
"""Ensure required Ansible collections are installed"""
# List of required collections that can be expanded in the future
required_collections = [
"community.general",
]
printfe("cyan", "Checking for required Ansible collections...")
# Get list of installed collections using ansible-galaxy
status, output = run_command(["ansible-galaxy", "collection", "list"], shell=False)
if not status:
printfe("yellow", f"Failed to list Ansible collections: {output}")
printfe("yellow", "Will try to install all required collections.")
installed_collections = []
else:
# Parse output to get installed collections
installed_collections = []
# Split output into lines and process
lines = output.splitlines()
collection_section = False
for line in lines:
line = line.strip()
# Skip empty lines
if not line:
continue
# Check if we've reached the collection listing section
if line.startswith("Collection"):
collection_section = True
continue
# Skip the separator line after the header
if collection_section and line.startswith("--"):
continue
# Process collection entries
if collection_section and " " in line:
# Format is typically: "community.general 10.4.0"
parts = line.split()
if len(parts) >= 1:
collection_name = parts[0]
installed_collections.append(collection_name)
# Check which required collections are missing
missing_collections = []
for collection in required_collections:
if collection not in installed_collections:
missing_collections.append(collection)
# Install missing collections
if missing_collections:
for collection in missing_collections:
printfe("yellow", f"Installing {collection} collection...")
status, install_output = run_command(
["ansible-galaxy", "collection", "install", collection], shell=False
)
if not status:
printfe(
"yellow",
f"Warning: Failed to install {collection} collection: {install_output}",
)
printfe(
"yellow",
f"Continuing anyway, but playbook might fail if it requires {collection}",
)
else:
printfe("green", f"Successfully installed {collection} collection")
else:
printfe("green", "All required collections are already installed.")
return True
def get_sudo_password_from_1password(username, hostname):
"""Fetches the sudo password from 1Password using the op CLI tool."""
printfe("cyan", "Attempting to fetch sudo password from 1Password...")
try:
op_command = [
"op",
"read",
f"op://Dotfiles/sudo/{username} {hostname}",
]
result = subprocess.run(op_command, capture_output=True, text=True, check=True)
password = result.stdout.strip()
printfe("green", "Successfully fetched sudo password from 1Password.")
return password
except subprocess.CalledProcessError as e:
printfe("red", f"Failed to fetch password from 1Password: {e.stderr.strip()}")
return None
except FileNotFoundError:
printfe("red", "Error: 'op' command not found. Please ensure 1Password CLI is installed and in your PATH.")
return None
except Exception as e:
printfe("red", f"An unexpected error occurred while fetching password: {e}")
return None
def main():
# Parse arguments
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--ha", "-H", action="store_true", help="Upgrade Home Manager packages"
)
parser.add_argument(
"--ansible", "-A", action="store_true", help="Upgrade Ansible packages"
)
parser.add_argument(
"--ansible-verbose",
action="store_true",
help="Upgrade Ansible packages with verbose output",
)
parser.add_argument(
"--tags", type=str, help="Run only specific Ansible tags"
)
parser.add_argument(
"--full-speed", "-F", action="store_true", help="Use all available cores"
)
parser.add_argument(
"--help", "-h", action="store_true", help="Display help message"
)
parser.add_argument(
"--skip-check", "-s", action="store_true", help="Skip checking for dotfiles updates"
)
args = parser.parse_args()
if args.help:
return help_message()
# If no specific option provided, run all
if not args.ha and not args.ansible and not args.ansible_verbose:
args.ha = True
args.ansible = True
# If ansible_verbose is set, also set ansible
if args.ansible_verbose:
args.ansible = True
# Always check git repository first unless skip-check is set
if not args.skip_check:
if not check_git_repository():
printfe("red", "Failed to check or update dotfiles repository.")
return 1
else:
printfe("yellow", "Skipping dotfiles repository update check (--skip-check).")
# Set cores and jobs based on full-speed flag
if args.full_speed:
import multiprocessing
cores = jobs = multiprocessing.cpu_count()
else:
cores = 8
jobs = 1
printfe("cyan", f"Limiting to {cores} cores with {jobs} jobs.")
# Home Manager update
if args.ha:
dotfiles_path = os.environ.get(
"DOTFILES_PATH", os.path.expanduser("~/.dotfiles")
)
hostname = os.uname().nodename
printfe("cyan", "Updating Home Manager flake...")
os.chdir(f"{dotfiles_path}/config/home-manager")
status, output = run_command(
[
"nix",
"--extra-experimental-features",
"nix-command",
"--extra-experimental-features",
"flakes",
"flake",
"update",
],
shell=False,
)
if not status:
printfe("red", f"Failed to update Home Manager flake: {output}")
return 1
# Check if home-manager is installed
status, _ = run_command(["which", "home-manager"], shell=False)
if status:
printfe("cyan", "Cleaning old backup files...")
backup_file = os.path.expanduser("~/.config/mimeapps.list.backup")
if os.path.exists(backup_file):
os.remove(backup_file)
printfe("cyan", "Upgrading Home Manager packages...")
env = os.environ.copy()
env["NIXPKGS_ALLOW_UNFREE"] = "1"
cmd = [
"home-manager",
"--extra-experimental-features",
"nix-command",
"--extra-experimental-features",
"flakes",
"switch",
"-b",
"backup",
f"--flake",
f".#{hostname}",
"--impure",
"--cores",
str(cores),
"-j",
str(jobs),
]
result = subprocess.run(cmd, env=env)
if result.returncode != 0:
printfe("red", "Failed to upgrade Home Manager packages.")
return 1
else:
printfe("red", "Home Manager is not installed.")
return 1
# Ansible update
if args.ansible:
dotfiles_path = os.environ.get(
"DOTFILES_PATH", os.path.expanduser("~/.dotfiles")
)
hostname = os.uname().nodename
username = os.environ.get("USER", os.environ.get("USERNAME", "user"))
# Ensure required collections are installed
if not ensure_ansible_collections():
printfe(
"red", "Failed to ensure required Ansible collections are installed"
)
return 1
printfe("cyan", "Running Ansible playbook...")
playbook_path = f"{dotfiles_path}/config/ansible/playbook.yml"
ansible_cmd = [
"/usr/bin/env",
"ansible-playbook",
"-i",
f"{dotfiles_path}/config/ansible/inventory.ini",
playbook_path,
"--extra-vars",
f"hostname={hostname}",
"--extra-vars",
f"ansible_user={username}",
"--limit",
hostname,
]
sudo_password = None
if not os.isatty(sys.stdin.fileno()):
printfe("yellow", "Warning: Not running in an interactive terminal. Cannot fetch password from 1Password.")
ansible_cmd.append("--ask-become-pass")
else:
sudo_password = get_sudo_password_from_1password(username, hostname)
if sudo_password:
ansible_cmd.extend(["--become-pass-file", "-"])
else:
printfe("yellow", "Could not fetch password from 1Password. Falling back to --ask-become-pass.")
ansible_cmd.append("--ask-become-pass")
if args.tags:
ansible_cmd.extend(["--tags", args.tags])
if args.ansible_verbose:
ansible_cmd.append("-vvv")
# Debug: Show the command being executed
printfe("yellow", f"Debug: Executing command: {' '.join(ansible_cmd)}")
# Execute the Ansible command, passing password via stdin if available
if sudo_password:
result = subprocess.run(ansible_cmd, input=sudo_password.encode('utf-8'))
else:
result = subprocess.run(ansible_cmd)
if result.returncode != 0:
printfe("red", "Failed to upgrade Ansible packages.")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())

229
bin/actions/update.sh Executable file
View File

@@ -0,0 +1,229 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
# check if --verbose was passed
if [ "$2" = "--verbose" ]; then
export verbose=true
printfe "%s\n" "yellow" "Verbose mode enabled"
else
export verbose=false
fi
# Check if we have shyaml since that's required for the script to function
if [ ! -x "$(command -v shyaml)" ]; then
printfe "%s\n" "red" "shyaml is not installed, installing it..."
pipx install shyaml
fi
ensure_symlink() {
local source
local target
# Fetch target from YAML
target=$(shyaml get-value "config.symlinks.$1.target" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
# Fetch source from YAML based on OS
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Check for WSL2
if [[ $(uname -a) == *"microsoft-standard-WSL2"* ]]; then
source=$(shyaml get-value "config.symlinks.$1.sources.wsl" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
else
source=$(shyaml get-value "config.symlinks.$1.sources.linux" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
fi
elif [[ "$OSTYPE" == "darwin"* ]]; then
source=$(shyaml get-value "config.symlinks.$1.sources.macos" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
fi
# Fall back to generic source if OS-specific source is empty
if [ -z "$source" ]; then
source=$(shyaml get-value "config.symlinks.$1.source" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
fi
# Attempt to use the hostname of the machine if source is still empty
if [ -z "$source" ]; then
source=$(shyaml get-value "config.symlinks.$1.sources.$(hostname)" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
fi
# Error out if source is still empty
if [ -z "$source" ]; then
printfe "%s\n" "red" " - No valid source defined for $1"
return
fi
# Expand ~ with $HOME
source="${source/#\~/$HOME}"
target="${target/#\~/$HOME}"
# Call the function to check or make the symlink
check_or_make_symlink "$source" "$target"
# Check if there is a chmod defined for the target file
desired_chmod=$(shyaml get-value "config.symlinks.$1.chmod" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
if [ -n "$desired_chmod" ]; then
# Resolve the target if it is a symlink
resolved_target=$(readlink -f "$target")
# If readlink fails, fall back to the original target
if [ -z "$resolved_target" ]; then
resolved_target="$target"
fi
current_chmod=$(stat -c %a "$resolved_target" 2>/dev/null)
if [ "$current_chmod" != "$desired_chmod" ]; then
printfe "%s\n" "yellow" " - Changing chmod of $resolved_target to $desired_chmod"
chmod "$desired_chmod" "$resolved_target"
fi
fi
}
symlinks() {
####################################################################################################
# Update symlinks
####################################################################################################
# Load symlinks from config file
symlinks=($(cat $HOME/dotfiles/config/config.yaml | shyaml keys config.symlinks))
printfe "%s\n" "cyan" "Updating symlinks..."
for symlink in "${symlinks[@]}"; do
ensure_symlink $symlink
done
}
####################################################################################################
# Update packages
####################################################################################################
sys_packages() {
if [[ "$OSTYPE" == "darwin"* ]]; then
printfe "%s\n" "cyan" "Updating brew packages..."
brew update
brew upgrade
brew cleanup
else
if [ -x "$(command -v nixos-version)" ]; then
printfe "%s\n" "cyan" "Updating nix channels..."
printfe "%s" "cyan" "System channels: "
sudo -i nix-channel --update
printfe "%s" "cyan" "User channels: "
nix-channel --update
printfe "%s\n" "cyan" "Updating nixos flake..."
cd $HOME/dotfiles/config/nixos && nix --extra-experimental-features nix-command --extra-experimental-features flakes flake update
# Exit if this failed
if [ $? -ne 0 ]; then
exit $?
fi
return
fi
if [ -x "$(command -v apt)" ]; then
printfe "%s\n" "cyan" "Updating apt packages..."
sudo nala upgrade -y
sudo nala autoremove -y --purge
fi
if [ -x "$(command -v yum)" ]; then
printfe "%s\n" "cyan" "Updating yum packages..."
sudo yum update -y
fi
fi
}
cargopkgs() {
printfe "%s\n" "cyan" "Ensuring Cargo packages are installed..."
source $HOME/dotfiles/bin/helpers/cargo_packages.sh
ensure_cargo_packages_installed
}
pipxpkgs() {
if [ ! -x "$(command -v pipx)" ]; then
printfe "%s\n" "yellow" "pipx is not available, skipping pipx packages."
return
fi
printfe "%s\n" "cyan" "Ensuring pipx packages are installed..."
source $HOME/dotfiles/bin/helpers/pipx_packages.sh
ensure_pipx_packages_installed
}
flatpakpkgs() {
if [ ! -x "$(command -v flatpak)" ]; then
printfe "%s\n" "yellow" "Flatpak is not available, skipping Flatpak."
return
fi
if is_wsl; then
printfe "%s\n" "yellow" "Running in WSL, skipping Flatpak."
return
fi
printfe "%s\n" "cyan" "Ensuring Flatpak packages are installed..."
source $HOME/dotfiles/bin/helpers/flatpak_packages.sh
ensure_flatpak_packages_installed
}
homemanager() {
printfe "%s\n" "cyan" "Updating Home Manager flake..."
cd $HOME/dotfiles/config/home-manager && nix --extra-experimental-features nix-command --extra-experimental-features flakes flake update
}
####################################################################################################
# Parse arguments
####################################################################################################
if [ "$#" -eq 0 ]; then
printfe "%s\n" "yellow" "No options passed, running full update..."
symlinks
sys_packages
homemanager
cargopkgs
pipxpkgs
flatpakpkgs
dotf secrets encrypt
else
for arg in "$@"; do
case $arg in
--nixos|nixos|nix|nixos-rebuild)
sys_packages
;;
--home-manager|--homemanager|ha|hm|home)
homemanager
;;
--nix)
sys_packages
homemanager
;;
--symlinks)
symlinks
;;
--packages)
sys_packages
cargopkgs
pipxpkgs
flatpakpkgs
;;
--pipx)
pipxpkgs
;;
--cargo)
cargopkgs
;;
--flatpak)
flatpakpkgs
;;
*)
printfe "%s\n" "red" "Unknown option: $arg"
;;
esac
done
fi
echo ""
printfe "%s\n" "blue" "Done!"

72
bin/actions/upgrade.sh Executable file
View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
help() {
printfe "%s\n" "green" "Usage: upgrade.sh [options]"
printfe "%s\n" "green" "Options:"
printfe "%s\n" "green" " --ha, -H Upgrade Home Manager packages."
printfe "%s\n" "green" " --nix, -X Upgrade NixOS packages."
printfe "%s\n" "green" " --full-speed, -F Upgrade packages and use all available cores for compilation. (Default: 8 cores)"
printfe "%s\n" "green" " --help, -h Display this help message."
exit 0
}
while [[ "$#" -gt 0 ]]; do
case $1 in
--ha|-H) RUN_HA=true ;;
--nix|-X) RUN_NIX=true ;;
--full-speed|-F) FULL_SPEED=true ;;
--help|-h) help ;;
*) echo "Unknown parameter passed: $1";
help ;;
esac
shift
done
if [[ -z "$RUN_HA" && -z "$RUN_NIX" ]]; then
RUN_HA=true
RUN_NIX=true
fi
# Check if --full-speed flag is passed, otherwise use --cores 8 -j 1
if [[ "$FULL_SPEED" == true ]]; then
CORES=$(nproc)
JOBS=$(nproc)
else
CORES=8
JOBS=1
fi
printfe "%s\n" "cyan" "Limiting to $CORES cores with $JOBS jobs."
if [[ "$RUN_NIX" == true ]]; then
if command -v nixos-rebuild &> /dev/null; then
printfe "%s\n" "cyan" "Upgrading NixOS packages..."
cd $HOME/dotfiles/config/nixos && sudo nixos-rebuild switch --upgrade --flake .#$DOTF_HOSTNAME --impure --cores $CORES -j $JOBS
if [[ $? -ne 0 ]]; then
printfe "%s\n" "red" "Failed to upgrade NixOS packages."
exit 1
fi
else
printfe "%s\n" "red" "Skipping nixos-rebuild, NixOS is not installed."
fi
fi
if [[ "$RUN_HA" == true ]]; then
if command -v home-manager &> /dev/null; then
printfe "%s\n" "cyan" "Cleaning old backup files..."
rm -rf $HOME/.config/mimeapps.list.backup
printfe "%s\n" "cyan" "Upgrading Home Manager packages..."
cd $HOME/dotfiles/config/home-manager && NIXPKGS_ALLOW_UNFREE=1 home-manager --extra-experimental-features nix-command --extra-experimental-features flakes switch -b backup --flake .#$DOTF_HOSTNAME --impure --cores $CORES -j $JOBS
if [[ $? -ne 0 ]]; then
printfe "%s\n" "red" "Failed to upgrade Home Manager packages."
exit 1
fi
else
printfe "%s\n" "red" "Home Manager is not installed."
exit 1
fi
fi

251
bin/dotf
View File

@@ -1,136 +1,135 @@
#!/usr/bin/env python3
#!/usr/bin/env bash
import os
import signal
import subprocess
import sys
def signal_handler(sig, frame):
print('Exiting.')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# strict mode
set -euo pipefail
IFS=$'\n\t'
# Script constants
DOTFILES_ROOT = os.path.expanduser("~/.dotfiles")
DOTFILES_BIN = os.path.join(DOTFILES_ROOT, "bin")
DOTFILES_PATH = DOTFILES_ROOT # For compatibility with the original scripts
readonly DOTFILES_ROOT="$HOME/dotfiles"
readonly DOTFILES_BIN="$DOTFILES_ROOT/bin"
readonly DOTFILES_CONFIG="$DOTFILES_ROOT/config/config.yaml"
# Import helper functions
sys.path.append(DOTFILES_BIN)
from helpers.functions import printfe, ensure_dependencies
# Source helper functions
if [[ ! -f "$DOTFILES_BIN/helpers/functions.sh" ]]; then
echo "Error: Required helper functions not found"
exit 1
fi
source "$DOTFILES_BIN/helpers/functions.sh"
ensure_dependencies()
export DOTFILES_CONFIG
def run_script(script_path, args):
"""Run an action script with the given arguments"""
if not os.path.isfile(script_path) or not os.access(script_path, os.X_OK):
printfe("red", f"Error: Script not found or not executable: {script_path}")
# Command functions
update() {
local update_script="$DOTFILES_BIN/actions/update.sh"
if [[ ! -x "$update_script" ]]; then
printfe "%s\n" "red" "Error: Update script not found or not executable"
return 1
result = subprocess.run([script_path] + args, env={**os.environ, "DOTFILES_PATH": DOTFILES_PATH})
return result.returncode
def update(args):
"""Run the update action"""
return run_script(f"{DOTFILES_BIN}/actions/update.py", args)
def hello(args):
"""Run the hello action"""
return run_script(f"{DOTFILES_BIN}/actions/hello.py", args)
def help(args):
"""Run the help action"""
return run_script(f"{DOTFILES_BIN}/actions/help.py", args)
def secrets(args):
"""Run the secrets action"""
return run_script(f"{DOTFILES_BIN}/actions/secrets.py", args)
def auto_start(args):
"""Run the auto-start action"""
return run_script(f"{DOTFILES_BIN}/actions/auto-start.py", args)
def service(args):
"""Run the service/docker action"""
return run_script(f"{DOTFILES_BIN}/actions/service.py", args)
def lint(args):
"""Run the lint action"""
return run_script(f"{DOTFILES_BIN}/actions/lint.py", args)
def timers(args):
"""Run the timers action"""
return run_script(f"{DOTFILES_BIN}/actions/timers.py", args)
def source(args):
"""Run the source action"""
return run_script(f"{DOTFILES_BIN}/actions/source.py", args)
def ensure_git_hooks():
"""Ensure git hooks are correctly set up"""
hooks_dir = os.path.join(DOTFILES_ROOT, ".git/hooks")
target_link = os.path.join(DOTFILES_BIN, "actions/git")
# Validate target directory exists
if not os.path.isdir(target_link):
printfe("red", f"Error: Git hooks source directory does not exist: {target_link}")
return 1
# Handle existing symlink
if os.path.islink(hooks_dir):
current_link = os.readlink(hooks_dir)
if current_link != target_link:
printfe("yellow", "Incorrect git hooks symlink found. Removing and recreating...")
os.remove(hooks_dir)
else:
return 0
# Handle existing directory
if os.path.isdir(hooks_dir) and not os.path.islink(hooks_dir):
printfe("yellow", "Removing existing hooks directory...")
import shutil
shutil.rmtree(hooks_dir)
# Create new symlink
try:
os.symlink(target_link, hooks_dir)
printfe("green", "Git hooks successfully configured!")
return 0
except Exception as e:
printfe("red", f"Failed to create git hooks symlink: {e}")
return 1
def main():
# Ensure we're in the correct directory
if not os.path.isdir(DOTFILES_ROOT):
printfe("red", "Error: Dotfiles directory not found")
return 1
# Setup git hooks
if ensure_git_hooks() != 0:
return 1
# Parse commands
command = sys.argv[1] if len(sys.argv) > 1 else "help"
args = sys.argv[2:]
commands = {
"update": update,
"help": help,
"hello": hello,
"secrets": secrets,
"auto-start": auto_start,
"service": service,
"lint": lint,
"timers": timers,
"source": source
fi
"$update_script" $@
}
if command in commands:
return commands[command](args)
else:
return help([])
upgrade() {
local upgrade_script="$DOTFILES_BIN/actions/upgrade.sh"
if [[ ! -x "$upgrade_script" ]]; then
printfe "%s\n" "red" "Error: Upgrade script not found or not executable"
return 1
fi
"$upgrade_script" $@
}
if __name__ == "__main__":
sys.exit(main())
hello() {
local term_script="$DOTFILES_BIN/actions/hello.sh"
if [[ ! -x "$term_script" ]]; then
printfe "%s\n" "red" "Error: Terminal script not found or not executable"
return 1
fi
"$term_script" "$@"
}
help() {
local help_script="$DOTFILES_BIN/actions/help.sh"
if [[ ! -x "$help_script" ]]; then
printfe "%s\n" "red" "Error: Help script not found or not executable"
return 1
fi
"$help_script" "$@"
}
secrets() {
local secrets_script="$DOTFILES_BIN/actions/secrets.sh"
if [[ ! -x "$secrets_script" ]]; then
printfe "%s\n" "red" "Error: Secrets script not found or not executable"
return 1
fi
"$secrets_script" "$@"
}
auto_start() {
local auto_start_script="$DOTFILES_BIN/actions/auto-start.sh"
if [[ ! -x "$auto_start_script" ]]; then
printfe "%s\n" "red" "Error: Auto-start script not found or not executable"
return 1
fi
"$auto_start_script" "$@"
}
ensure_git_hooks() {
local hooks_dir="$DOTFILES_ROOT/.git/hooks"
local target_link="$DOTFILES_BIN/actions/git"
# Validate target directory exists
if [[ ! -d "$target_link" ]]; then
printfe "%s\n" "red" "Error: Git hooks source directory does not exist: $target_link"
return 1
fi
# Handle existing symlink
if [[ -L "$hooks_dir" ]]; then
local current_link
current_link=$(readlink "$hooks_dir")
if [[ "$current_link" != "$target_link" ]]; then
printfe "%s\n" "yellow" "Incorrect git hooks symlink found. Removing and recreating..."
rm "$hooks_dir"
else
return 0
fi
fi
# Handle existing directory
if [[ -d "$hooks_dir" ]]; then
printfe "%s\n" "yellow" "Removing existing hooks directory..."
rm -rf "$hooks_dir"
fi
# Create new symlink
if ln -s "$target_link" "$hooks_dir"; then
printfe "%s\n" "green" "Git hooks successfully configured!"
else
printfe "%s\n" "red" "Failed to create git hooks symlink"
return 1
fi
}
main() {
# Ensure we're in the correct directory
if [[ ! -d "$DOTFILES_ROOT" ]]; then
printfe "%s\n" "red" "Error: Dotfiles directory not found"
exit 1
fi
# Setup git hooks
ensure_git_hooks || exit 1
# Parse commands
case "${1:-help}" in
update) shift; update "$@" ;;
upgrade) shift; upgrade "$@" ;;
help) shift; help "$@" ;;
hello) shift; hello "$@" ;;
secrets) shift; secrets "$@" ;;
auto-start) shift; auto_start "$@" ;;
*) help ;;
esac
}
main "$@"

1
bin/dotfiles Symbolic link
View File

@@ -0,0 +1 @@
dotf

72
bin/helpers/cargo_packages.sh Executable file
View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
ensure_cargo_packages_installed() {
cargo_packages=($(cat $DOTFILES_CONFIG | shyaml keys config.packages.cargo))
for package in "${cargo_packages[@]}"; do
printfe "%s" "cyan" " - Checking $package..."
echo -en '\r'
# Some entries have a git_url and binary, we need to load these in if they exist
pkg_status=$(cargo install --list | grep -E "^${package}\sv[0-9.]+:$")
package_url=$(cat $DOTFILES_CONFIG | shyaml get-value config.packages.cargo.$package.git_url 2>/dev/null)
binary=$(cat $DOTFILES_CONFIG | shyaml get-value config.packages.cargo.$package.binary 2>/dev/null)
# If pkg_status is `installed` then we don't need to install the package, otherwise if it's empty then the package is not installed
if [ -z "$pkg_status" ]; then
ensure_sudo_privileges "In order to install $package, please provide your password:"
printfe "%s" "yellow" " - Compiling/Installing $package... (This may take a while)"
clear_line
# If package_url is defined we should install via git
if [ -n "$package_url" ]; then
command="cargo install --git $package_url $binary"
else
command="cargo install $package"
fi
# Execute the command
result=$(eval $command 2>&1)
if [ $? -ne 0 ]; then
printfe "%s\n" "red" " - Failed to install $package"
printfe "%s\n" "red" " Command: $command"
printfe "%s\n" "red" " Output: $result"
exit 1
fi
printfe "%s\n" "green" " - Installed $package"
else
printfe "%s\n" "green" " - $package is already installed"
fi
done
}
print_cargo_status() {
printfe "%s" "cyan" "Checking Cargo packages..."
clear_line
cargo_packages=($(cat $DOTFILES_CONFIG | shyaml keys config.packages.cargo))
count=$(echo $cargo_packages | wc -w)
installed=0
for package in "${cargo_packages[@]}"; do
pkg_status=$(cargo install --list | grep -E "^${package}\sv[0-9.]+:$")
if [ -z $pkg_status ]; then
if [ "$verbose" = true ]; then
printfe "%s\n" "red" "$package is not installed"
fi
else
installed=$((installed + 1))
fi
done
printfe "%s" "cyan" "Cargo"
if [ $installed -eq $count ]; then
printfe "%s" "green" " $installed/$count "
else
printfe "%s" "red" " $installed/$count "
fi
printfe "%s\n" "cyan" "packages installed"
}

55
bin/helpers/flatpak_packages.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
ensure_flatpak_packages_installed() {
flatpak_packages=($(ls $HOME/dotfiles/config/flatpaks/ | sed 's/.flatpakref//g'))
for package in "${flatpak_packages[@]}"; do
if ! flatpak list | grep -q $package; then
printfe "%s\n" "cyan" " - Installing $package..."
flatpak install -y flathub $package
if [ $? -eq 0 ]; then
printfe "%s\n" "green" " - $package installed successfully"
else
printfe "%s\n" "red" " - $package failed to install"
fi
else
printfe "%s\n" "green" " - $package is already installed"
fi
done
}
print_flatpak_status() {
if is_wsl; then
printfe "%s\n" "yellow" "Running in WSL, skipping Flatpak packages check."
return
fi
printfe "%s" "cyan" "Checking Flatpak packages..."
clear_line
flatpak_packages=($(ls $HOME/dotfiles/config/flatpaks/ | sed 's/.flatpakref//g'))
count=$(echo $flatpak_packages | wc -w)
installed=0
for package in "${flatpak_packages[@]}"; do
if flatpak list | grep -q $package; then
installed=$((installed + 1))
else
if [ "$verbose" = true ]; then
printfe "%s\n" "red" "$package is not installed"
fi
fi
done
printfe "%s" "cyan" "Flatpak"
if [ $installed -eq $count ]; then
printfe "%s" "green" " $installed/$count "
else
printfe "%s" "red" " $installed/$count "
fi
printfe "%s\n" "cyan" "packages installed"
}

View File

@@ -1,177 +0,0 @@
#!/usr/bin/env python3
import sys
import subprocess
import math
import random
import shutil
import datetime
try:
import pyfiglet
except ImportError:
pyfiglet = None
# Color codes for terminal output
COLORS = {
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;32m",
"yellow": "\033[0;33m",
"blue": "\033[0;34m",
"purple": "\033[0;35m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"grey": "\033[0;90m", # Added grey color for timestamp
"reset": "\033[0m",
}
def printfe(color, message, show_time=True):
"""
Print a formatted message with the specified color
With timestamp and message type prefix similar to setup.sh
"""
color_code = COLORS.get(color.lower(), COLORS["reset"])
if show_time:
# Add timestamp
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
print(f"{COLORS['grey']}{timestamp}{COLORS['reset']}", end="")
# Add message type based on color
if color.lower() in ["green", "cyan", "blue", "purple"]:
print(f"{COLORS['green']} INF {COLORS['reset']}", end="")
elif color.lower() == "yellow":
print(f"{COLORS['yellow']} WRN {COLORS['reset']}", end="")
elif color.lower() == "red":
print(f"{COLORS['red']} ERR {COLORS['reset']}", end="")
# Print the actual message with color
print(f"{color_code}{message}{COLORS['reset']}")
def println(message, color=None):
"""Print a line with optional color"""
if color:
printfe(color, message)
else:
printfe("reset", message)
def _rainbow_color(text, freq=0.1, offset=0):
"""Apply rainbow colors to text similar to lolcat"""
colored_text = ""
for i, char in enumerate(text):
if char.strip(): # Only color non-whitespace characters
# Calculate RGB values using sine waves with phase shifts
r = int(127 * math.sin(freq * i + offset + 0) + 128)
g = int(127 * math.sin(freq * i + offset + 2 * math.pi / 3) + 128)
b = int(127 * math.sin(freq * i + offset + 4 * math.pi / 3) + 128)
# Apply the RGB color to the character
colored_text += f"\033[38;2;{r};{g};{b}m{char}\033[0m"
else:
colored_text += char
return colored_text
def logo(continue_after=False):
"""Display the dotfiles logo"""
try:
# Try to read logo file first for backward compatibility
if pyfiglet:
# Generate ASCII art with pyfiglet and rainbow colors
ascii_art = pyfiglet.figlet_format("Menno's Dotfiles", font="slant")
print("\n") # Add some space before the logo
# Use a random offset to vary the rainbow start position
random_offset = random.random() * 2 * math.pi
line_offset = 0
for line in ascii_art.splitlines():
# Add a little variation to each line
print(_rainbow_color(line, offset=random_offset + line_offset))
line_offset += 0.1
else:
# Fallback if pyfiglet is not available
printfe("yellow", "\n *** Menno's Dotfiles ***\n")
printfe("cyan", " Note: Install pyfiglet for better logo display")
printfe("cyan", " (pip install pyfiglet)\n")
if not continue_after:
sys.exit(0)
except Exception as e:
printfe("red", f"Error displaying logo: {e}")
def run_command(command, shell=False):
"""Run a shell command and return the result"""
try:
if not shell and not shutil.which(command[0]):
return False, f"Command '{command[0]}' not found"
result = subprocess.run(
command,
shell=shell,
check=True,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return True, result.stdout.strip()
except subprocess.CalledProcessError as e:
return False, e.stderr.strip()
except FileNotFoundError:
return False, f"Command '{command[0]}' not found"
def command_exists(command):
"""Check if a command exists in the PATH"""
return shutil.which(command) is not None
def ensure_dependencies():
"""Check and install required dependencies for the dotfiles system"""
required_packages = [
"pyfiglet", # For ASCII art generation
]
# Check if pip is available
success, _ = run_command(["pip", "--version"])
if not success:
printfe(
"red",
"Pip is required to install missing dependencies, retry after running `dotf update`",
)
return False
missing_packages = []
for package in required_packages:
try:
__import__(package)
except ImportError:
missing_packages.append(package)
if missing_packages:
printfe("yellow", f"Missing dependencies: {', '.join(missing_packages)}")
install = input("Would you like to install them now? (y/n): ").lower()
if install == "y" or install == "yes":
printfe("cyan", "Installing missing dependencies...")
for package in missing_packages:
printfe("blue", f"Installing {package}...")
success, output = run_command(
["pip", "install", "--user", package, "--break-system-packages"]
)
if success:
printfe("green", f"Successfully installed {package}")
else:
printfe("red", f"Failed to install {package}: {output}")
printfe("green", "All dependencies have been processed")
return True
else:
printfe("yellow", "Skipping dependency installation")
return False
return True

262
bin/helpers/functions.sh Executable file
View File

@@ -0,0 +1,262 @@
#!/usr/bin/env bash
#Color print function, usage: println "message" "color"
println() {
color=$2
printfe "%s\n" $color "$1"
}
is_wsl() {
if [ -f "/proc/sys/fs/binfmt_misc/WSLInterop" ]; then
return 0
else
return 1
fi
}
logo() {
echo "Menno's Dotfiles" | figlet | lolcat
if [[ $(trash-list | wc -l) -gt 0 ]]; then
printfe "%s" "yellow" "[!] $(trash-list | wc -l | tr -d ' ') file(s) in trash - "
fi
# Print if repo is dirty and the count of untracked files, modified files and staged files
if [[ $(git -C ~/dotfiles status --porcelain) ]]; then
printfe "%s" "yellow" "dotfiles is dirty "
printfe "%s" "red" "[$(git -C ~/dotfiles status --porcelain | grep -c '^??')] untracked "
printfe "%s" "yellow" "[$(git -C ~/dotfiles status --porcelain | grep -c '^ M')] modified "
printfe "%s" "green" "[$(git -C ~/dotfiles status --porcelain | grep -c '^M ')] staged "
fi
printfe "%s" "blue" "[$(git -C ~/dotfiles rev-parse --short HEAD)] "
if [[ $(git -C ~/dotfiles log origin/master..HEAD) ]]; then
printfe "%s" "yellow" "[!] You have $(git -C ~/dotfiles log origin/master..HEAD --oneline | wc -l | tr -d ' ') commit(s) to push"
fi
println "" "normal"
}
# print colored with printf (args: format, color, message ...)
printfe() {
format=$1
color=$2
shift 2
red=$(tput setaf 1)
green=$(tput setaf 2)
yellow=$(tput setaf 3)
blue=$(tput setaf 4)
magenta=$(tput setaf 5)
cyan=$(tput setaf 6)
normal=$(tput sgr0)
case $color in
"red")
color=$red
;;
"green")
color=$green
;;
"yellow")
color=$yellow
;;
"blue")
color=$blue
;;
"magenta")
color=$magenta
;;
"cyan")
color=$cyan
;;
*)
color=$normal
;;
esac
printf "$color$format$normal" "$@"
}
ensure_package_installed() {
if ! command -v $1 &>/dev/null; then
println "$1 is not installed. Please install it." "red"
exit 1
fi
println " - $1 is available." "green"
}
ensure_sudo_privileges() {
if sudo -n true 2>/dev/null; then
return
else
println "$1" "yellow"
sudo true
fi
}
function exesudo ()
{
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
#
# LOCAL VARIABLES:
#
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
#
# I use underscores to remember it's been passed
local _funcname_="$1"
local params=( "$@" ) ## array containing all params passed here
local tmpfile="/dev/shm/$RANDOM" ## temporary file
local content ## content of the temporary file
local regex ## regular expression
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
#
# MAIN CODE:
#
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
#
# WORKING ON PARAMS:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Shift the first param (which is the name of the function)
unset params[0] ## remove first element
# params=( "${params[@]}" ) ## repack array
#
# WORKING ON THE TEMPORARY FILE:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
content="#!/bin/bash\n\n"
#
# Write the params array
content="${content}params=(\n"
regex="\s+"
for param in "${params[@]}"
do
if [[ "$param" =~ $regex ]]
then
content="${content}\t\"${param}\"\n"
else
content="${content}\t${param}\n"
fi
done
content="$content)\n"
echo -e "$content" > "$tmpfile"
#
# Append the function source
echo "#$( type "$_funcname_" )" >> "$tmpfile"
#
# Append the call to the function
echo -e "\n$_funcname_ \"\${params[@]}\"\n" >> "$tmpfile"
#
# DONE: EXECUTE THE TEMPORARY FILE WITH SUDO
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sudo bash "$tmpfile"
rm "$tmpfile"
}
resolve_path() {
echo "$(cd "$(dirname "$1")"; pwd)/$(basename "$1")"
}
check_or_make_symlink() {
source /home/menno/dotfiles/bin/helpers/functions.sh
SOURCE="$1"
TARGET="$2"
# Take any ~ and replace it with $HOME
SOURCE="${SOURCE/#\~/$HOME}"
TARGET="${TARGET/#\~/$HOME}"
# Ensure the parent directory of the target exists
mkdir -p "$(dirname "$TARGET")"
# if source doesn't exist it's likely a secret that hasn't been decrypted yet
if [ ! -e "$SOURCE" ]; then
printfe "%s\n" "yellow" " - Source $SOURCE doesn't exist"
return
fi
SOURCE=$(resolve_path "$SOURCE")
TARGET=$(resolve_path "$TARGET")
# Check if we have permissions to create the symlink
if [ ! -w "$(dirname "$TARGET")" ]; then
# Check if link exists
if [ -L "$TARGET" ]; then
# Check if it points to the correct location
if [ "$(readlink "$TARGET")" != "$SOURCE" ]; then
exesudo check_or_make_symlink "$SOURCE" "$TARGET"
return
fi
else
# Link doesn't exist but we don't have permissions to create it, so we should try to create it with sudosudo
exesudo check_or_make_symlink "$SOURCE" "$TARGET"
fi
return
fi
# If target is already a symlink, we should check if it points to the correct location
if [ -L "$TARGET" ]; then
if [ "$(readlink "$TARGET")" != "$SOURCE" ]; then
printfe "%s\n" "yellow" " - Symlink $TARGET exists but points to the wrong location"
printfe "%s\n" "yellow" " Expected: $SOURCE"
printfe "%s\n" "yellow" " Actual: $(readlink "$TARGET")"
printfe "%s\n" "yellow" " Fixing symlink"
rm "$TARGET"
mkdir -p "$(dirname "$TARGET")"
ln -s "$SOURCE" "$TARGET"
printfe "%s\n" "green" " Created symlink $TARGET -> $SOURCE"
return
fi
fi
# If target is a file and it's not a symlink, we should back it up
if [ -f "$TARGET" ] && [ ! -L "$TARGET" ]; then
printfe "%s\n" "yellow" " - File $TARGET exists, backing up and creating symlink"
mv "$TARGET" "$TARGET.bak"
fi
# If the target is already a symlink, and it points to the correct location, we should return and be happy
if [ -L "$TARGET" ]; then
printfe "%s" "green" " - OK: "
printfe "%-30s" "blue" "$SOURCE"
printfe "%s" "cyan" " -> "
printfe "%-30s\n" "blue" "$TARGET"
return
fi
# Create the symlink
mkdir -p "$(dirname "$TARGET")"
ln -s "$SOURCE" "$TARGET"
# Check if the symlink was created successfully
if [ ! -L "$TARGET" ]; then
printfe "%s\n" "red" " - Failed to create symlink $TARGET -> $SOURCE"
return
fi
printfe "%s" "green" " - Added new symlink: "
printfe "%-30s" "blue" "$SOURCE"
printfe "%s" "cyan" " -> "
printfe "%-30s\n" "blue" "$TARGET"
}
clear_line() {
echo -en "\r"
}

55
bin/helpers/pipx_packages.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
source $HOME/dotfiles/bin/helpers/functions.sh
ensure_pipx_packages_installed() {
pipx_packages=($(cat $DOTFILES_CONFIG | shyaml get-values config.packages.pipx))
for i in "${pipx_packages[@]}";
do
printfe "%s" "cyan" " - Fetching package details for $i"
echo -en '\r'
if pipx list | grep --quiet ${i}; then
printfe "%s\n" "green" " - $i is already installed."
continue
fi
printfe "%s" "cyan" " - Installing $i..."
echo -en '\r'
pipx install $i
if [ $? -ne 0 ]; then
printfe "%s\n" "red" " - Failed to install $i"
continue
fi
printfe "%s\n" "green" " - $i installed."
done
}
print_pipx_status() {
printfe "%s" "cyan" "Checking pipx packages..."
clear_line
pipx_packages=($(cat $DOTFILES_CONFIG | shyaml get-values config.packages.pipx))
count=$(echo $pipx_packages | wc -w)
installed=0
for package in "${pipx_packages[@]}"; do
if pipx list | grep -q $package; then
installed=$((installed + 1))
else
if [ "$verbose" = true ]; then
printfe "%s\n" "red" "$package is not installed"
fi
fi
done
printfe "%s" "cyan" "pipx"
if [ $installed -eq $count ]; then
printfe "%s" "green" " $installed/$count "
else
printfe "%s" "red" " $installed/$count "
fi
printfe "%s\n" "cyan" "packages installed"
}

View File

@@ -1,35 +1,11 @@
Usage: dotf [OPTIONS] [ARGS]
update: Update everything in the dotfiles repository.
Options:
--ha, -H Upgrade Home Manager packages
--ansible, -A Upgrade Ansible packages
--ansible-verbose Upgrade Ansible packages with verbose output (-vvv)
--full-speed, -F Use all available cores for compilation (Default: 8 cores)
update: Pull latest changes, and update symlinks and configurations
Also pulls latest nix channels and updates flakes to latest versions.
upgrade: Runs switch, flake variants for nix switch with upgrade and home-manager.
secrets: Encrypt and decrypt secrets.
Commands:
encrypt Encrypt all files in the secrets folder
decrypt Decrypt all .gpg files in the secrets folder
service: Manage Docker services for development.
Commands:
start SERVICE Start a Docker service
stop SERVICE Stop a Docker service
restart SERVICE Restart a Docker service
update SERVICE Update a Docker service (pull new images and recreate)
update --all Update all running services
logs SERVICE Show Docker service logs
ps [SERVICE] Show Docker service status
list, ls List available Docker services
lint: Run linters on dotfiles.
Options:
--ansible Run only ansible-lint
--nix Run only nixfmt
--python Run only Python linters (pylint, black)
--fix Auto-fix issues where possible
auto-start: Start a set of pre-defined applications.
hello: Shows the welcome message for the terminal.
help: Shows this help message

View File

@@ -1,22 +0,0 @@
#cloud-config
# Create a user named menno with sudo privileges
users:
- name: menno
sudo: ALL=(ALL) NOPASSWD:ALL
groups: sudo, adm
shell: /bin/bash
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM+sKpcREOUjwMMSzEWAso6830wbOi8kUxqpuXWw5gHr menno_1password
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE22Hfx8wgkc57TXX1TCMHcNrCdjbfog5QeHFJfl7IeD menno_fallback
# Update package lists and install latest updates
package_update: true
package_upgrade: true
# Configure system to preserve hostname
preserve_hostname: false
hostname: mennos-vm
# Final message when cloud-init completes
final_message: "Cloud-init has finished setting up the system with user 'menno'. System boot completed after $UPTIME seconds."

View File

@@ -1,25 +0,0 @@
# Ansible Configuration
## 1Password Integration
This Ansible configuration includes a custom lookup plugin for fetching secrets from 1Password.
The 1Password CLI must be installed and authenticated on the machine running Ansible.
See [1Password Integration Readme](plugins/lookup/README.md)
### Prerequisites
1. Install 1Password CLI
2. Sign in to 1Password using `op signin`
3. Service account should be properly configured
### Finding Vault IDs
To find your vault ID:
```bash
op vault list
```
For more information, see the [1Password CLI documentation](https://developer.1password.com/docs/cli).
```

View File

@@ -1,5 +0,0 @@
[defaults]
inventory = inventory
roles_path = roles
collections_paths = collections
retry_files_enabled = False

View File

@@ -1,124 +0,0 @@
# Dynamic DNS OnePassword Setup
This document explains how to set up the required OnePassword entries for the Dynamic DNS automation.
## Overview
The Dynamic DNS task automatically retrieves credentials from OnePassword using the Ansible OnePassword lookup plugin. This eliminates the need for vault files and provides better security.
## Required OnePassword Entries
### 1. CloudFlare API Token
**Location:** `CloudFlare API Token` in `Dotfiles` vault, field `password`
**Setup Steps:**
1. Go to [CloudFlare API Tokens](https://dash.cloudflare.com/profile/api-tokens)
2. Click "Create Token"
3. Use the "Edit zone DNS" template
4. Configure permissions:
- Zone: DNS: Edit
- Zone Resources: Include all zones (or specific zones for your domains)
5. Add IP address filtering if desired (optional but recommended)
6. Click "Continue to summary" and "Create Token"
7. Copy the token and save it in OnePassword:
- Title: `CloudFlare API Token`
- Vault: `Dotfiles`
- Field: `password` (this should be the main password field)
### 2. Telegram Bot Credentials
**Location:** `Telegram DynDNS Bot` in `Dotfiles` vault, fields `password` and `chat_id`
**Setup Steps:**
#### Create Telegram Bot:
1. Message [@BotFather](https://t.me/BotFather) on Telegram
2. Send `/start` then `/newbot`
3. Follow the prompts to create your bot
4. Save the bot token (format: `123456789:ABCdefGHijklMNopQRstUVwxyz`)
#### Get Chat ID:
1. Send any message to your new bot
2. Visit: `https://api.telegram.org/bot<YOUR_BOT_TOKEN>/getUpdates`
3. Look for `"chat":{"id":YOUR_CHAT_ID}` in the response
4. Save the chat ID (format: `987654321` or `-987654321` for groups)
#### Save in OnePassword:
- Title: `Telegram DynDNS Bot`
- Vault: `Dotfiles`
- Fields:
- `password`: Your bot token (123456789:ABCdefGHijklMNopQRstUVwxyz)
- `chat_id`: Your chat ID (987654321)
## Verification
You can test that the OnePassword lookups work by running:
```bash
# Test CloudFlare token lookup
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}"
# Test Telegram bot token
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}"
# Test Telegram chat ID
ansible localhost -m debug -a "msg={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}"
```
## Security Notes
- Credentials are never stored in version control
- Environment file (`~/.local/bin/dynamic-dns.env`) has 600 permissions
- OnePassword CLI must be authenticated before running Ansible
- Make sure to run `op signin` before executing the playbook
## Troubleshooting
### OnePassword CLI Not Authenticated
```bash
op signin
```
### Missing Fields in OnePassword
Ensure the exact field names match:
- CloudFlare: field must be named `password`
- Telegram: fields must be named `password` and `chat_id`
### Invalid CloudFlare Token
- Check token has `Zone:DNS:Edit` permissions
- Verify token is active in CloudFlare dashboard
- Test with: `curl -H "Authorization: Bearer YOUR_TOKEN" https://api.cloudflare.com/client/v4/user/tokens/verify`
### Telegram Not Working
- Ensure you've sent at least one message to your bot
- Verify chat ID format (numbers only, may start with -)
- Test with: `go run dynamic-dns-cf.go --test-telegram`
## Usage
Once set up, the dynamic DNS will automatically:
- Update DNS records every 15 minutes
- Send Telegram notifications when IP changes
- Log all activity to system journal (`journalctl -t dynamic-dns`)
## Domains Configured
The automation updates these domains:
- `vleeuwen.me`
- `mvl.sh`
- `mennovanleeuwen.nl`
To modify the domain list, edit the wrapper script at:
`~/.local/bin/dynamic-dns-update.sh`

View File

@@ -1,31 +0,0 @@
---
flatpaks: false
install_ui_apps: false
# Countries that are allowed to access the server Caddy reverse proxy
allowed_countries_codes:
- US # United States
- CA # Canada
- GB # United Kingdom
- DE # Germany
- FR # France
- ES # Spain
- IT # Italy
- NL # Netherlands
- AU # Australia
- NZ # New Zealand
- JP # Japan
- KR # South Korea
- SK # Slovakia
- FI # Finland
- DK # Denmark
- SG # Singapore
- AT # Austria
- CH # Switzerland
# IP ranges for blocked countries (generated automatically)
# This will be populated by the country blocking script
blocked_countries: []
# Enable/disable country blocking globally
enable_country_blocking: true

View File

@@ -1,3 +0,0 @@
---
flatpaks: true
install_ui_apps: true

View File

@@ -1,30 +0,0 @@
---
- name: Systemctl daemon-reload
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: Restart SSH service
become: true
ansible.builtin.service:
name: ssh
state: restarted
enabled: true
- name: reload systemd
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: restart borg-local-sync
become: true
ansible.builtin.systemd:
name: borg-local-sync.service
enabled: true
- name: restart borg-local-sync-timer
become: true
ansible.builtin.systemd:
name: borg-local-sync.timer
state: restarted
enabled: true

View File

@@ -1,8 +0,0 @@
[workstations]
mennos-laptop ansible_connection=local
mennos-desktop ansible_connection=local
[servers]
mennos-server ansible_connection=local
mennos-vm ansible_connection=local
mennos-desktop ansible_connection=local

View File

@@ -1,19 +0,0 @@
---
- name: Configure all hosts
hosts: all
handlers:
- name: Import handler tasks
ansible.builtin.import_tasks: handlers/main.yml
gather_facts: true
tasks:
- name: Include global tasks
ansible.builtin.import_tasks: tasks/global/global.yml
- name: Include workstation tasks
ansible.builtin.import_tasks: tasks/workstations/workstation.yml
when: inventory_hostname in ['mennos-laptop', 'mennos-desktop']
- name: Include server tasks
ansible.builtin.import_tasks: tasks/servers/server.yml
when: inventory_hostname in ['mennos-server', 'mennos-hobbypc', 'mennos-vm', 'mennos-desktop']

View File

@@ -1,4 +0,0 @@
---
# Collections section
collections:
- community.general

View File

@@ -1,53 +0,0 @@
---
- name: Check if Docker CE is installed
ansible.builtin.command: docker --version
register: docker_check
changed_when: false
failed_when: false
# Arch-based distributions (CachyOS, Arch Linux, etc.)
- name: Install Docker on Arch-based systems
community.general.pacman:
name:
- docker
- docker-compose
- docker-buildx
state: present
become: true
when: docker_check.rc != 0 and ansible_pkg_mgr == 'pacman'
# Non-Arch distributions
- name: Download Docker installation script
ansible.builtin.get_url:
url: https://get.docker.com
dest: /tmp/get-docker.sh
mode: "0755"
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Install Docker CE on non-Arch systems
ansible.builtin.shell: bash -c 'set -o pipefail && sh /tmp/get-docker.sh'
args:
executable: /bin/bash
creates: /usr/bin/docker
when: docker_check.rc != 0 and ansible_pkg_mgr != 'pacman'
- name: Add user to docker group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
become: true
when: docker_check.rc != 0
- name: Enable and start docker service
ansible.builtin.systemd:
name: docker
state: started
enabled: true
become: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
become: true
notify: Reload systemd

View File

@@ -1,137 +0,0 @@
---
- name: Include global symlinks tasks
ansible.builtin.import_tasks: tasks/global/symlinks.yml
- name: Gather package facts
ansible.builtin.package_facts:
manager: auto
become: true
- name: Debug ansible_facts for troubleshooting
ansible.builtin.debug:
msg: |
OS Family: {{ ansible_facts['os_family'] }}
Distribution: {{ ansible_facts['distribution'] }}
Package Manager: {{ ansible_pkg_mgr }}
Kernel: {{ ansible_kernel }}
tags: debug
- name: Include Tailscale tasks
ansible.builtin.import_tasks: tasks/global/tailscale.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Docker tasks
ansible.builtin.import_tasks: tasks/global/docker.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include Ollama tasks
ansible.builtin.import_tasks: tasks/global/ollama.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Include OpenSSH Server tasks
ansible.builtin.import_tasks: tasks/global/openssh-server.yml
become: true
when: "'microsoft-standard-WSL2' not in ansible_kernel"
- name: Ensure common packages are installed on Arch-based systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python
- python-pip
- python-pipx
- python-pylint
- go
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure common packages are installed on non-Arch systems
ansible.builtin.package:
name:
- git
- vim
- curl
- wget
- httpie
- python3
- python3-pip
- python3-venv
- pylint
- black
- pipx
- nala
- golang
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Configure performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "vm.max_map_count", value: "16777216" }
# --- PBinCLI via pipx ---
- name: Ensure pbincli is installed with pipx
ansible.builtin.command: pipx install pbincli
args:
creates: ~/.local/bin/pbincli
environment:
PIPX_DEFAULT_PYTHON: /usr/bin/python3
become: false
- name: Ensure ~/.config/pbincli directory exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.config/pbincli"
state: directory
mode: "0755"
- name: Configure pbincli to use custom server
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.config/pbincli/pbincli.conf"
content: |
server=https://bin.mvl.sh
mode: "0644"
- name: Include WSL2 tasks
ansible.builtin.import_tasks: tasks/global/wsl.yml
when: "'microsoft-standard-WSL2' in ansible_kernel"
- name: Include Utils tasks
ansible.builtin.import_tasks: tasks/global/utils.yml
become: true
tags: utils
- name: Ensure ~/.hushlogin exists
ansible.builtin.stat:
path: ~/.hushlogin
register: hushlogin_stat
- name: Create ~/.hushlogin if it does not exist
ansible.builtin.file:
path: ~/.hushlogin
state: touch
mode: "0644"
when: not hushlogin_stat.stat.exists
# Ensure pwfeedback is enabled in sudoers for better password UX
- name: Ensure pwfeedback is present in Defaults env_reset line in /etc/sudoers
ansible.builtin.replace:
path: /etc/sudoers
regexp: '^Defaults\s+env_reset(?!.*pwfeedback)'
replace: 'Defaults env_reset,pwfeedback'
validate: 'visudo -cf %s'
become: true
tags: sudoers

View File

@@ -1,27 +0,0 @@
---
- name: Check if Ollama is installed
ansible.builtin.command: ollama --version
register: ollama_check
changed_when: false
failed_when: false
- name: Download Ollama install script
ansible.builtin.get_url:
url: https://ollama.com/install.sh
dest: /tmp/install_ollama.sh
mode: "0755"
when: ollama_check.rc != 0
- name: Install Ollama
ansible.builtin.command: bash -c 'set -o pipefail && sh /tmp/install_ollama.sh'
when: ollama_check.rc != 0
args:
creates: /usr/local/bin/ollama
- name: Check if Ollama is running
ansible.builtin.systemd:
name: ollama
state: started
enabled: true
become: true
register: ollama_service

View File

@@ -1,36 +0,0 @@
---
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH service is enabled and running on Arch-based systems
ansible.builtin.service:
name: sshd
state: started
enabled: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure SSH service is enabled and running on non-Arch systems
ansible.builtin.service:
name: ssh
state: started
enabled: true
when: ansible_pkg_mgr != 'pacman'
- name: Ensure SSH server configuration is proper
ansible.builtin.template:
src: templates/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
validate: "/usr/sbin/sshd -t -f %s"
notify: Restart SSH service

View File

@@ -1,41 +0,0 @@
---
- name: Server setup
block:
- name: Set user home directory
ansible.builtin.set_fact:
user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}"
- name: Create basic symlinks
ansible.builtin.file:
src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
dest: "{{ item.dest | replace('~', user_home) }}"
state: link
force: true
follow: false
loop:
- {
src: "$DOTFILES_PATH/config/home-manager",
dest: "~/.config/home-manager",
}
- { src: "$DOTFILES_PATH/config/ssh/config", dest: "~/.ssh/config" }
- {
src: "$DOTFILES_PATH/config/starship.toml",
dest: "~/.config/starship.toml",
}
- { src: "$DOTFILES_PATH/.bashrc", dest: "~/.bashrc.extra" }
- name: Create gitconfig symlink
ansible.builtin.file:
src: "{{ gitconfig_mapping[inventory_hostname] | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
dest: "{{ user_home }}/.gitconfig"
state: link
force: true
follow: false
vars:
gitconfig_mapping:
mennos-desktop: "$DOTFILES_PATH/config/git/gitconfig.linux"
mennos-laptop: "$DOTFILES_PATH/config/git/gitconfig.linux"
mennos-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
mennos-vm: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
tags:
- symlinks

View File

@@ -1,32 +0,0 @@
---
- name: Check if Tailscale is installed
ansible.builtin.command: which tailscale
register: tailscale_check
changed_when: false
failed_when: false
- name: Install Tailscale using curl script
ansible.builtin.shell: curl -fsSL https://tailscale.com/install.sh | sh
args:
creates: /usr/bin/tailscale
when: tailscale_check.rc != 0
become: true
- name: Check if Tailscale is running
ansible.builtin.command: tailscale status
register: tailscale_status
changed_when: false
failed_when: false
- name: Enable and start Tailscale service
ansible.builtin.systemd:
name: tailscaled
state: started
enabled: true
daemon_reload: true
become: true
- name: Notify user to authenticate Tailscale
ansible.builtin.debug:
msg: "Please authenticate Tailscale by running: sudo tailscale up --operator=$USER"
when: tailscale_status.rc != 0

View File

@@ -1,62 +0,0 @@
---
- name: Process utils files
block:
- name: Load DOTFILES_PATH environment variable
ansible.builtin.set_fact:
dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}"
become: false
- name: Ensure ~/.local/bin exists
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.local/bin"
state: directory
mode: "0755"
become: false
- name: Scan utils folder for files
ansible.builtin.find:
paths: "{{ dotfiles_path }}/config/ansible/tasks/global/utils"
file_type: file
register: utils_files
become: false
- name: Scan utils folder for Go projects (directories with go.mod)
ansible.builtin.find:
paths: "{{ dotfiles_path }}/config/ansible/tasks/global/utils"
file_type: directory
recurse: true
register: utils_dirs
become: false
- name: Filter directories that contain go.mod files
ansible.builtin.stat:
path: "{{ item.path }}/go.mod"
loop: "{{ utils_dirs.files }}"
register: go_mod_check
become: false
- name: Create symlinks for utils scripts
ansible.builtin.file:
src: "{{ item.path }}"
dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}"
state: link
loop: "{{ utils_files.files }}"
when: not item.path.endswith('.go')
become: false
- name: Compile standalone Go files and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}"
loop: "{{ utils_files.files }}"
when: item.path.endswith('.go')
become: false
- name: Compile Go projects and place binaries in ~/.local/bin
ansible.builtin.command:
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.item.path | basename }}" .
chdir: "{{ item.item.path }}"
loop: "{{ go_mod_check.results }}"
when: item.stat.exists
become: false
tags:
- utils

View File

@@ -1,903 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// CloudFlare API structures
type CloudFlareResponse struct {
Success bool `json:"success"`
Errors []CloudFlareError `json:"errors"`
Result json.RawMessage `json:"result"`
Messages []CloudFlareMessage `json:"messages"`
}
type CloudFlareError struct {
Code int `json:"code"`
Message string `json:"message"`
}
type CloudFlareMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
type DNSRecord struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Content string `json:"content"`
TTL int `json:"ttl"`
ZoneID string `json:"zone_id"`
}
type Zone struct {
ID string `json:"id"`
Name string `json:"name"`
}
type TokenVerification struct {
ID string `json:"id"`
Status string `json:"status"`
}
type NotificationInfo struct {
RecordName string
OldIP string
NewIP string
IsNew bool
}
// Configuration
type Config struct {
APIToken string
RecordNames []string
IPSources []string
DryRun bool
Verbose bool
Force bool
TTL int
TelegramBotToken string
TelegramChatID string
Client *http.Client
}
// Default IP sources
var defaultIPSources = []string{
"https://ifconfig.co/ip",
"https://ip.seeip.org",
"https://ipv4.icanhazip.com",
"https://api.ipify.org",
}
func main() {
config := &Config{
Client: &http.Client{Timeout: 10 * time.Second},
}
// Command line flags
var ipSourcesFlag string
var recordsFlag string
var listZones bool
var testTelegram bool
flag.StringVar(&recordsFlag, "record", "", "DNS A record name(s) to update - comma-separated for multiple (required)")
flag.StringVar(&ipSourcesFlag, "ip-sources", "", "Comma-separated list of IP detection services (optional)")
flag.BoolVar(&config.DryRun, "dry-run", false, "Show what would be done without making changes")
flag.BoolVar(&config.Verbose, "verbose", false, "Enable verbose logging")
flag.BoolVar(&listZones, "list-zones", false, "List all accessible zones and exit")
flag.BoolVar(&config.Force, "force", false, "Force update even if IP hasn't changed")
flag.BoolVar(&testTelegram, "test-telegram", false, "Send a test Telegram notification and exit")
flag.IntVar(&config.TTL, "ttl", 300, "TTL for DNS record in seconds")
// Custom usage function
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "CloudFlare Dynamic DNS Tool\n\n")
fmt.Fprintf(os.Stderr, "Updates CloudFlare DNS A records with your current public IP address.\n")
fmt.Fprintf(os.Stderr, "Supports multiple records, dry-run mode, and Telegram notifications.\n\n")
fmt.Fprintf(os.Stderr, "USAGE:\n")
fmt.Fprintf(os.Stderr, " %s [OPTIONS]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "REQUIRED ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " CLOUDFLARE_API_TOKEN CloudFlare API token with Zone:DNS:Edit permissions\n")
fmt.Fprintf(os.Stderr, " Get from: https://dash.cloudflare.com/profile/api-tokens\n\n")
fmt.Fprintf(os.Stderr, "OPTIONAL ENVIRONMENT VARIABLES:\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_BOT_TOKEN Telegram bot token for notifications\n")
fmt.Fprintf(os.Stderr, " TELEGRAM_CHAT_ID Telegram chat ID to send notifications to\n\n")
fmt.Fprintf(os.Stderr, "OPTIONS:\n")
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "\nEXAMPLES:\n")
fmt.Fprintf(os.Stderr, " # Update single record\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Update multiple records\n")
fmt.Fprintf(os.Stderr, " %s -record \"home.example.com,api.example.com,vpn.mydomain.net\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Dry run with verbose output\n")
fmt.Fprintf(os.Stderr, " %s -dry-run -verbose -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Force update even if IP hasn't changed\n")
fmt.Fprintf(os.Stderr, " %s -force -record home.example.com\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Custom TTL and IP sources\n")
fmt.Fprintf(os.Stderr, " %s -record home.example.com -ttl 600 -ip-sources \"https://ifconfig.co/ip,https://api.ipify.org\"\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # List accessible CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " %s -list-zones\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Test Telegram notifications\n")
fmt.Fprintf(os.Stderr, " %s -test-telegram\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "SETUP:\n")
fmt.Fprintf(os.Stderr, " 1. Create CloudFlare API token:\n")
fmt.Fprintf(os.Stderr, " - Go to https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, " - Use 'Edit zone DNS' template\n")
fmt.Fprintf(os.Stderr, " - Select your zones\n")
fmt.Fprintf(os.Stderr, " - Copy token and set CLOUDFLARE_API_TOKEN environment variable\n\n")
fmt.Fprintf(os.Stderr, " 2. Optional: Setup Telegram notifications:\n")
fmt.Fprintf(os.Stderr, " - Message @BotFather on Telegram to create a bot\n")
fmt.Fprintf(os.Stderr, " - Get your chat ID by messaging your bot, then visit:\n")
fmt.Fprintf(os.Stderr, " https://api.telegram.org/bot<BOT_TOKEN>/getUpdates\n")
fmt.Fprintf(os.Stderr, " - Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables\n\n")
fmt.Fprintf(os.Stderr, "NOTES:\n")
fmt.Fprintf(os.Stderr, " - Records can be in different CloudFlare zones\n")
fmt.Fprintf(os.Stderr, " - Only updates when IP actually changes (unless -force is used)\n")
fmt.Fprintf(os.Stderr, " - Supports both root domains and subdomains\n")
fmt.Fprintf(os.Stderr, " - Telegram notifications sent only when IP changes\n")
fmt.Fprintf(os.Stderr, " - Use -dry-run to test without making changes\n\n")
}
flag.Parse()
// Validate required arguments (unless listing zones or testing telegram)
if recordsFlag == "" && !listZones && !testTelegram {
fmt.Fprintf(os.Stderr, "Error: -record flag is required\n")
flag.Usage()
os.Exit(1)
}
// Parse record names
if recordsFlag != "" {
config.RecordNames = strings.Split(recordsFlag, ",")
// Trim whitespace from each record name
for i, record := range config.RecordNames {
config.RecordNames[i] = strings.TrimSpace(record)
}
}
// Get API token from environment
config.APIToken = os.Getenv("CLOUDFLARE_API_TOKEN")
if config.APIToken == "" {
fmt.Fprintf(os.Stderr, "Error: CLOUDFLARE_API_TOKEN environment variable is required\n")
fmt.Fprintf(os.Stderr, "Get your API token from: https://dash.cloudflare.com/profile/api-tokens\n")
fmt.Fprintf(os.Stderr, "Create a token with 'Zone:DNS:Edit' permissions for your zone\n")
os.Exit(1)
}
// Get optional Telegram credentials
config.TelegramBotToken = os.Getenv("TELEGRAM_BOT_TOKEN")
config.TelegramChatID = os.Getenv("TELEGRAM_CHAT_ID")
if config.Verbose && config.TelegramBotToken != "" && config.TelegramChatID != "" {
fmt.Println("Telegram notifications enabled")
}
// Parse IP sources
if ipSourcesFlag != "" {
config.IPSources = strings.Split(ipSourcesFlag, ",")
} else {
config.IPSources = defaultIPSources
}
if config.Verbose {
fmt.Printf("Config: Records=%v, TTL=%d, DryRun=%v, Force=%v, IPSources=%v\n",
config.RecordNames, config.TTL, config.DryRun, config.Force, config.IPSources)
}
// If testing telegram, do that and exit (skip API token validation)
if testTelegram {
if err := testTelegramNotification(config); err != nil {
fmt.Fprintf(os.Stderr, "Error testing Telegram: %v\n", err)
os.Exit(1)
}
return
}
// Validate API token
if err := validateToken(config); err != nil {
fmt.Fprintf(os.Stderr, "Error validating API token: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Println("API token validated successfully")
}
// If listing zones, do that and exit
if listZones {
if err := listAllZones(config); err != nil {
fmt.Fprintf(os.Stderr, "Error listing zones: %v\n", err)
os.Exit(1)
}
return
}
// Get current public IP
currentIP, err := getCurrentIP(config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting current IP: %v\n", err)
os.Exit(1)
}
if config.Verbose {
fmt.Printf("Current public IP: %s\n", currentIP)
fmt.Printf("Processing %d record(s)\n", len(config.RecordNames))
}
// Process each record
var totalUpdates int
var allNotifications []NotificationInfo
for _, recordName := range config.RecordNames {
if config.Verbose {
fmt.Printf("\n--- Processing record: %s ---\n", recordName)
}
// Find the zone for the record
zoneName, zoneID, err := findZoneForRecord(config, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding zone for %s: %v\n", recordName, err)
continue
}
if config.Verbose {
fmt.Printf("Found zone: %s (ID: %s)\n", zoneName, zoneID)
}
// Find existing DNS record
record, err := findDNSRecordByName(config, zoneID, recordName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding DNS record %s: %v\n", recordName, err)
continue
}
// Compare IPs
if record != nil {
if record.Content == currentIP && !config.Force {
fmt.Printf("DNS record %s already points to %s - no update needed\n", recordName, currentIP)
continue
}
if config.Verbose {
if record.Content == currentIP {
fmt.Printf("DNS record %s already points to %s, but forcing update\n",
recordName, currentIP)
} else {
fmt.Printf("DNS record %s currently points to %s, needs update to %s\n",
recordName, record.Content, currentIP)
}
}
} else {
if config.Verbose {
fmt.Printf("DNS record %s does not exist, will create it\n", recordName)
}
}
// Update or create record
if config.DryRun {
if record != nil {
if record.Content == currentIP && config.Force {
fmt.Printf("DRY RUN: Would force update DNS record %s (already %s)\n",
recordName, currentIP)
} else {
fmt.Printf("DRY RUN: Would update DNS record %s from %s to %s\n",
recordName, record.Content, currentIP)
}
} else {
fmt.Printf("DRY RUN: Would create DNS record %s with IP %s\n",
recordName, currentIP)
}
// Collect notification info for dry-run
if record == nil || record.Content != currentIP || config.Force {
var oldIPForNotification string
if record != nil {
oldIPForNotification = record.Content
}
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIPForNotification,
NewIP: currentIP,
IsNew: record == nil,
})
}
continue
}
var wasUpdated bool
var oldIP string
if record != nil {
oldIP = record.Content
err = updateDNSRecordByName(config, zoneID, record.ID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully updated DNS record %s to %s\n", recordName, currentIP)
wasUpdated = true
} else {
err = createDNSRecordByName(config, zoneID, recordName, currentIP)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating DNS record %s: %v\n", recordName, err)
continue
}
fmt.Printf("Successfully created DNS record %s with IP %s\n", recordName, currentIP)
wasUpdated = true
}
// Collect notification info for actual updates
if wasUpdated && (record == nil || oldIP != currentIP || config.Force) {
allNotifications = append(allNotifications, NotificationInfo{
RecordName: recordName,
OldIP: oldIP,
NewIP: currentIP,
IsNew: record == nil,
})
totalUpdates++
}
}
// Send batch notification if there were any changes
if len(allNotifications) > 0 {
sendBatchTelegramNotification(config, allNotifications, config.DryRun)
}
if !config.DryRun && config.Verbose {
fmt.Printf("\nProcessed %d record(s), %d update(s) made\n", len(config.RecordNames), totalUpdates)
}
}
func validateToken(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/user/tokens/verify", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("token validation failed: %v", cfResp.Errors)
}
var tokenInfo TokenVerification
if err := json.Unmarshal(cfResp.Result, &tokenInfo); err != nil {
return err
}
if tokenInfo.Status != "active" {
return fmt.Errorf("token is not active, status: %s", tokenInfo.Status)
}
return nil
}
func getCurrentIP(config *Config) (string, error) {
var lastError error
for _, source := range config.IPSources {
if config.Verbose {
fmt.Printf("Trying IP source: %s\n", source)
}
resp, err := config.Client.Get(source)
if err != nil {
lastError = err
if config.Verbose {
fmt.Printf("Failed to get IP from %s: %v\n", source, err)
}
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastError = err
continue
}
if resp.StatusCode != 200 {
lastError = fmt.Errorf("HTTP %d from %s", resp.StatusCode, source)
continue
}
ip := strings.TrimSpace(string(body))
if ip != "" {
return ip, nil
}
lastError = fmt.Errorf("empty response from %s", source)
}
return "", fmt.Errorf("failed to get IP from any source, last error: %v", lastError)
}
func findZoneForRecord(config *Config, recordName string) (string, string, error) {
// Extract domain from record name (e.g., "sub.example.com" -> try "example.com", "com")
parts := strings.Split(recordName, ".")
if config.Verbose {
fmt.Printf("Finding zone for record: %s\n", recordName)
}
for i := 0; i < len(parts); i++ {
zoneName := strings.Join(parts[i:], ".")
req, err := http.NewRequest("GET",
fmt.Sprintf("https://api.cloudflare.com/client/v4/zones?name=%s", zoneName), nil)
if err != nil {
continue
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
continue
}
var cfResp CloudFlareResponse
err = json.NewDecoder(resp.Body).Decode(&cfResp)
resp.Body.Close()
if err != nil || !cfResp.Success {
continue
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
continue
}
if len(zones) > 0 {
return zones[0].Name, zones[0].ID, nil
}
}
return "", "", fmt.Errorf("no zone found for record %s", recordName)
}
func findDNSRecordByName(config *Config, zoneID string, recordName string) (*DNSRecord, error) {
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records?type=A&name=%s",
zoneID, recordName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return nil, err
}
if !cfResp.Success {
return nil, fmt.Errorf("API error: %v", cfResp.Errors)
}
var records []DNSRecord
if err := json.Unmarshal(cfResp.Result, &records); err != nil {
return nil, err
}
if len(records) == 0 {
return nil, nil // Record doesn't exist
}
return &records[0], nil
}
func updateDNSRecordByName(config *Config, zoneID, recordID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s", zoneID, recordID)
req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func createDNSRecordByName(config *Config, zoneID, recordName, ip string) error {
data := map[string]interface{}{
"type": "A",
"name": recordName,
"content": ip,
"ttl": config.TTL,
}
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
url := fmt.Sprintf("https://api.cloudflare.com/client/v4/zones/%s/dns_records", zoneID)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
return nil
}
func listAllZones(config *Config) error {
req, err := http.NewRequest("GET", "https://api.cloudflare.com/client/v4/zones", nil)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+config.APIToken)
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var cfResp CloudFlareResponse
if err := json.NewDecoder(resp.Body).Decode(&cfResp); err != nil {
return err
}
if !cfResp.Success {
return fmt.Errorf("API error: %v", cfResp.Errors)
}
var zones []Zone
if err := json.Unmarshal(cfResp.Result, &zones); err != nil {
return err
}
fmt.Printf("Found %d accessible zones:\n", len(zones))
for _, zone := range zones {
fmt.Printf(" - %s (ID: %s)\n", zone.Name, zone.ID)
}
if len(zones) == 0 {
fmt.Println("No zones found. Make sure your API token has Zone:Read permissions.")
}
return nil
}
func sendTelegramNotification(config *Config, record *DNSRecord, oldIP, newIP string, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if record == nil {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", newIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, "test-record", oldIP, newIP, config.TTL)
}
// Prepare Telegram API request
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
// Send notification
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}
func testTelegramNotification(config *Config) error {
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return fmt.Errorf("Telegram not configured. Set TELEGRAM_BOT_TOKEN and TELEGRAM_CHAT_ID environment variables")
}
fmt.Println("Testing Telegram notification...")
// Send a test message
message := "🧪 Dynamic DNS Test\n\n" +
"This is a test notification from your CloudFlare Dynamic DNS tool.\n\n" +
"✅ Telegram integration is working correctly!"
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to marshal payload: %v", err)
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %v", err)
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode == 200 {
fmt.Println("✅ Test notification sent successfully!")
if config.Verbose {
fmt.Printf("Response: %s\n", string(body))
}
return nil
} else {
return fmt.Errorf("failed to send notification (HTTP %d): %s", resp.StatusCode, string(body))
}
}
func sendBatchTelegramNotification(config *Config, notifications []NotificationInfo, isDryRun bool) {
// Skip if Telegram is not configured
if config.TelegramBotToken == "" || config.TelegramChatID == "" {
return
}
if len(notifications) == 0 {
return
}
var message string
dryRunPrefix := ""
if isDryRun {
dryRunPrefix = "🧪 DRY RUN - "
}
if len(notifications) == 1 {
// Single record notification
notif := notifications[0]
if notif.IsNew {
message = fmt.Sprintf("%s🆕 DNS Record Created\n\n"+
"Record: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else if notif.OldIP == notif.NewIP {
message = fmt.Sprintf("%s🔄 DNS Record Force Updated\n\n"+
"Record: %s\n"+
"IP: %s (unchanged)\n"+
"TTL: %d seconds\n"+
"Note: Forced update requested",
dryRunPrefix, notif.RecordName, notif.NewIP, config.TTL)
} else {
message = fmt.Sprintf("%s🔄 IP Address Changed\n\n"+
"Record: %s\n"+
"Old IP: %s\n"+
"New IP: %s\n"+
"TTL: %d seconds",
dryRunPrefix, notif.RecordName, notif.OldIP, notif.NewIP, config.TTL)
}
} else {
// Multiple records notification
var newCount, updatedCount int
for _, notif := range notifications {
if notif.IsNew {
newCount++
} else {
updatedCount++
}
}
message = fmt.Sprintf("%s📋 Multiple DNS Records Updated\n\n", dryRunPrefix)
if newCount > 0 {
message += fmt.Sprintf("🆕 Created: %d record(s)\n", newCount)
}
if updatedCount > 0 {
message += fmt.Sprintf("🔄 Updated: %d record(s)\n", updatedCount)
}
message += fmt.Sprintf("\nNew IP: %s\nTTL: %d seconds\n\nRecords:", notifications[0].NewIP, config.TTL)
for _, notif := range notifications {
if notif.IsNew {
message += fmt.Sprintf("\n• %s (new)", notif.RecordName)
} else if notif.OldIP == notif.NewIP {
message += fmt.Sprintf("\n• %s (forced)", notif.RecordName)
} else {
message += fmt.Sprintf("\n• %s (%s → %s)", notif.RecordName, notif.OldIP, notif.NewIP)
}
}
}
// Send the notification using the same logic as single notifications
telegramURL := fmt.Sprintf("https://api.telegram.org/bot%s/sendMessage", config.TelegramBotToken)
payload := map[string]interface{}{
"chat_id": config.TelegramChatID,
"text": message,
"parse_mode": "HTML",
}
jsonData, err := json.Marshal(payload)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to marshal Telegram payload: %v\n", err)
}
return
}
req, err := http.NewRequest("POST", telegramURL, bytes.NewBuffer(jsonData))
if err != nil {
if config.Verbose {
fmt.Printf("Failed to create Telegram request: %v\n", err)
}
return
}
req.Header.Set("Content-Type", "application/json")
resp, err := config.Client.Do(req)
if err != nil {
if config.Verbose {
fmt.Printf("Failed to send Telegram notification: %v\n", err)
}
return
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
if config.Verbose {
fmt.Println("Telegram notification sent successfully")
}
} else {
if config.Verbose {
body, _ := io.ReadAll(resp.Body)
fmt.Printf("Telegram notification failed (HTTP %d): %s\n", resp.StatusCode, string(body))
}
}
}

View File

@@ -1,328 +0,0 @@
package main
import (
"encoding/csv"
"flag"
"fmt"
"math"
"os"
"sort"
"strconv"
"strings"
"time"
)
type Trip struct {
StartTime time.Time
EndTime time.Time
StartAddr string
EndAddr string
KMStart float64
KMEnd float64
Distance float64
License string
BusinessCost float64
Type string
}
type MonthStats struct {
TotalKM float64
Trips int
Longest float64
Shortest float64
TotalDuration time.Duration
LongestGap time.Duration
OdoAnomalies int
AvgSpeed float64
AvgTripDuration time.Duration
FuelCost float64
}
func main() {
fuelPrice := flag.Float64("fuelprice", 0, "Fuel price per liter (EUR)")
fuelEfficiency := flag.Float64("efficiency", 0, "Fuel efficiency (km per liter)")
lPer100km := flag.Float64("lper100km", 0, "Fuel consumption (liters per 100km)")
flag.Parse()
if len(flag.Args()) < 1 {
fmt.Println("Usage: go run main.go -fuelprice <price> [-efficiency <km/l> | -lper100km <l/100km>] <filename.csv>")
flag.PrintDefaults()
return
}
// Convert l/100km to km/l if provided
finalEfficiency := *fuelEfficiency
if *lPer100km > 0 {
finalEfficiency = 100.0 / *lPer100km
}
file, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
defer file.Close()
reader := csv.NewReader(file)
reader.Comma = ','
records, err := reader.ReadAll()
if err != nil {
panic(err)
}
dutchMonths := map[string]string{
"januari": "January", "februari": "February", "maart": "March",
"april": "April", "mei": "May", "juni": "June", "juli": "July",
"augustus": "August", "september": "September", "oktober": "October",
"november": "November", "december": "December",
}
tripsByMonth := make(map[string][]Trip)
startAddrCount := make(map[string]int)
endAddrCount := make(map[string]int)
fuelEnabled := *fuelPrice > 0 && finalEfficiency > 0
// Parse CSV
for _, record := range records[1:] {
if len(record) < 13 {
continue
}
// Parse start time
startTime, err := parseDutchTime(record[1], dutchMonths)
if err != nil {
continue
}
// Parse end time
endTime, err := parseDutchTime(record[2], dutchMonths)
if err != nil {
continue
}
// Parse distance data
kmStart, _ := strconv.ParseFloat(strings.ReplaceAll(record[5], ",", ""), 64)
kmEnd, _ := strconv.ParseFloat(strings.ReplaceAll(record[6], ",", ""), 64)
distance, _ := strconv.ParseFloat(strings.ReplaceAll(record[7], ",", ""), 64)
trip := Trip{
StartTime: startTime,
EndTime: endTime,
StartAddr: record[3],
EndAddr: record[4],
KMStart: kmStart,
KMEnd: kmEnd,
Distance: distance,
License: record[8],
BusinessCost: parseFloat(record[11]),
Type: strings.TrimSpace(record[12]),
}
monthKey := fmt.Sprintf("%d-%02d", startTime.Year(), startTime.Month())
tripsByMonth[monthKey] = append(tripsByMonth[monthKey], trip)
startAddrCount[trip.StartAddr]++
endAddrCount[trip.EndAddr]++
}
// Calculate stats
months := sortedKeys(tripsByMonth)
statsByMonth := calculateStats(tripsByMonth, fuelEnabled, *fuelPrice, finalEfficiency)
// Print results
printMainTable(statsByMonth, months, fuelEnabled, tripsByMonth)
printTopAddresses(startAddrCount, endAddrCount)
}
func parseDutchTime(datetime string, monthMap map[string]string) (time.Time, error) {
parts := strings.Split(datetime, " ")
if len(parts) < 4 {
return time.Time{}, fmt.Errorf("invalid time format")
}
engMonth, ok := monthMap[strings.ToLower(parts[1])]
if !ok {
return time.Time{}, fmt.Errorf("unknown month")
}
timeStr := fmt.Sprintf("%s %s %s %s", parts[0], engMonth, parts[2], parts[3])
return time.Parse("2 January 2006 15:04", timeStr)
}
func calculateStats(tripsByMonth map[string][]Trip, fuelEnabled bool, fuelPrice, fuelEfficiency float64) map[string]MonthStats {
stats := make(map[string]MonthStats)
for month, trips := range tripsByMonth {
var s MonthStats
var prevEnd time.Time
var longestGap time.Duration
sumSpeed := 0.0
speedCount := 0
sort.Slice(trips, func(i, j int) bool {
return trips[i].StartTime.Before(trips[j].StartTime)
})
for i, t := range trips {
s.TotalKM += t.Distance
s.Trips++
duration := t.EndTime.Sub(t.StartTime)
s.TotalDuration += duration
if duration.Hours() > 0 {
sumSpeed += t.Distance / duration.Hours()
speedCount++
}
if t.Distance > s.Longest {
s.Longest = t.Distance
}
if t.Distance < s.Shortest || s.Shortest == 0 {
s.Shortest = t.Distance
}
if i > 0 {
gap := t.StartTime.Sub(prevEnd)
if gap > longestGap {
longestGap = gap
}
if math.Abs(trips[i-1].KMEnd-t.KMStart) > 0.01 {
s.OdoAnomalies++
}
}
prevEnd = t.EndTime
}
s.LongestGap = longestGap
if speedCount > 0 {
s.AvgSpeed = sumSpeed / float64(speedCount)
} else {
s.AvgSpeed = 0
}
if s.Trips > 0 {
s.AvgTripDuration = time.Duration(int64(s.TotalDuration) / int64(s.Trips))
}
if fuelEnabled {
s.FuelCost = (s.TotalKM / fuelEfficiency) * fuelPrice
}
stats[month] = s
}
return stats
}
func printMainTable(stats map[string]MonthStats, months []string, fuelEnabled bool, tripsByMonth map[string][]Trip) {
fmt.Println("\n=== Monthly Driving Overview ===")
headers := []string{"Month", "Total", "Trips", "AvgKM", "Longest", "Shortest",
"DriveTime", "AvgTripDur", "OdoErr", "AvgSpeed"}
format := "%-10s | %-16s | %-7s | %-14s | %-24s | %-26s | %-18s | %-18s | %-10s | %-18s"
if fuelEnabled {
headers = append(headers, "Fuel Cost (EUR)")
format += " | %-18s"
}
fmt.Printf(format+"\n", toInterfaceSlice(headers)...) // print header
fmt.Println(strings.Repeat("-", 180))
for _, month := range months {
s := stats[month]
trips := tripsByMonth[month]
// Find longest and shortest trip durations
var longestDur, shortestDur time.Duration
var longestDist, shortestDist float64
if len(trips) > 0 {
for i, t := range trips {
dur := t.EndTime.Sub(t.StartTime)
if t.Distance > longestDist || i == 0 {
longestDist = t.Distance
longestDur = dur
}
if t.Distance < shortestDist || i == 0 {
shortestDist = t.Distance
shortestDur = dur
}
}
}
row := []interface{}{
month,
fmt.Sprintf("%.2f Km", s.TotalKM),
fmt.Sprintf("%d", s.Trips),
fmt.Sprintf("%.2f Km", safeDiv(s.TotalKM, float64(s.Trips))),
fmt.Sprintf("%.2f Km (%s)", longestDist, fmtDuration(longestDur)),
fmt.Sprintf("%.2f Km (%s)", shortestDist, fmtDuration(shortestDur)),
fmtDuration(s.TotalDuration),
fmtDuration(s.AvgTripDuration),
fmt.Sprintf("%d", s.OdoAnomalies),
fmt.Sprintf("%.2f Km/h", s.AvgSpeed),
}
if fuelEnabled {
row = append(row, fmt.Sprintf("%.2f EUR", s.FuelCost))
}
fmt.Printf(format+"\n", row...)
}
}
func toInterfaceSlice(strs []string) []interface{} {
res := make([]interface{}, len(strs))
for i, v := range strs {
res[i] = v
}
return res
}
func printTopAddresses(start, end map[string]int) {
fmt.Println("\n=== Frequent Locations ===")
fmt.Println("Top 3 Start Addresses:")
printTopN(start, 3)
fmt.Println("\nTop 3 End Addresses:")
printTopN(end, 3)
}
// Helper functions (safeDiv, fmtDuration, printTopN) remain unchanged from previous version
// [Include the helper functions from previous script here]
func safeDiv(a, b float64) float64 {
if b == 0 {
return 0
}
return a / b
}
func fmtDuration(d time.Duration) string {
h := int(d.Hours())
m := int(d.Minutes()) % 60
return fmt.Sprintf("%02dh%02dm", h, m)
}
func printTopN(counter map[string]int, n int) {
type kv struct {
Key string
Value int
}
var sorted []kv
for k, v := range counter {
sorted = append(sorted, kv{k, v})
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i].Value > sorted[j].Value })
for i := 0; i < n && i < len(sorted); i++ {
fmt.Printf("%d. %s (%d)\n", i+1, sorted[i].Key, sorted[i].Value)
}
}
func sortedKeys(m map[string][]Trip) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func parseFloat(s string) float64 {
f, _ := strconv.ParseFloat(strings.ReplaceAll(s, ",", ""), 64)
return f
}

View File

@@ -1,365 +0,0 @@
package main
import (
"fmt"
"math"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// ANSI color codes
var colors = map[string]string{
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;32m",
"yellow": "\033[0;33m",
"blue": "\033[0;34m",
"purple": "\033[0;35m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"grey": "\033[0;90m",
"reset": "\033[0m",
}
// DistroIcon represents a distro icon and color
type DistroIcon struct {
Icon string
Color string
}
// DotfilesStatus represents the git status of dotfiles
type DotfilesStatus struct {
IsDirty bool
Untracked int
Modified int
Staged int
CommitHash string
Unpushed int
}
func main() {
welcome()
}
func rainbowColor(text string, freq float64, offset float64) string {
var result strings.Builder
for i, char := range text {
if strings.TrimSpace(string(char)) != "" { // Only color non-whitespace characters
// Calculate RGB values using sine waves with phase shifts
r := int(127*math.Sin(freq*float64(i)+offset+0) + 128)
g := int(127*math.Sin(freq*float64(i)+offset+2*math.Pi/3) + 128)
b := int(127*math.Sin(freq*float64(i)+offset+4*math.Pi/3) + 128)
// Apply the RGB color to the character
result.WriteString(fmt.Sprintf("\033[38;2;%d;%d;%dm%c\033[0m", r, g, b, char))
} else {
result.WriteRune(char)
}
}
return result.String()
}
func printLogo() {
logo := ` __ ___ _ ____ __ _____ __
/ |/ /__ ____ ____ ____ ( )_____ / __ \____ / /_/ __(_) /__ _____
/ /|_/ / _ \/ __ \/ __ \/ __ \|// ___/ / / / / __ \/ __/ /_/ / / _ \/ ___/
/ / / / __/ / / / / / / /_/ / (__ ) / /_/ / /_/ / /_/ __/ / / __(__ )
/_/ /_/\___/_/ /_/_/ /_/\____/ /____/ /_____/\____/\__/_/ /_/_/\___/____/`
lines := strings.Split(logo, "\n")
for _, line := range lines {
if strings.TrimSpace(line) != "" {
fmt.Println(rainbowColor(line, 0.1, 0))
} else {
fmt.Println()
}
}
fmt.Println()
}
func getLastSSHLogin() string {
user := os.Getenv("USER")
if user == "" {
user = os.Getenv("USERNAME")
}
if user == "" {
return ""
}
// Try lastlog first
cmd := exec.Command("lastlog", "-u", user)
output, err := cmd.CombinedOutput()
if err != nil {
// Try lastlog2
cmd = exec.Command("lastlog2", user)
output, err = cmd.CombinedOutput()
if err != nil {
return ""
}
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) >= 2 {
parts := strings.Fields(lines[1])
if len(parts) >= 7 && strings.Contains(parts[1], "ssh") {
ip := parts[2]
timeStr := strings.Join(parts[3:], " ")
return fmt.Sprintf("%sLast SSH login%s%s %s%s from%s %s",
colors["cyan"], colors["reset"], colors["yellow"], timeStr, colors["cyan"], colors["yellow"], ip)
}
}
return ""
}
func checkDotfilesStatus() *DotfilesStatus {
dotfilesPath := os.Getenv("DOTFILES_PATH")
if dotfilesPath == "" {
homeDir, _ := os.UserHomeDir()
dotfilesPath = filepath.Join(homeDir, ".dotfiles")
}
gitPath := filepath.Join(dotfilesPath, ".git")
if _, err := os.Stat(gitPath); os.IsNotExist(err) {
return nil
}
status := &DotfilesStatus{}
// Check git status
cmd := exec.Command("git", "status", "--porcelain")
cmd.Dir = dotfilesPath
output, err := cmd.Output()
if err == nil && strings.TrimSpace(string(output)) != "" {
status.IsDirty = true
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "??") {
status.Untracked++
}
if strings.HasPrefix(line, " M") || strings.HasPrefix(line, "MM") {
status.Modified++
}
if strings.HasPrefix(line, "M ") || strings.HasPrefix(line, "A ") {
status.Staged++
}
}
}
// Get commit hash
cmd = exec.Command("git", "rev-parse", "--short", "HEAD")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
status.CommitHash = strings.TrimSpace(string(output))
}
// Count unpushed commits
cmd = exec.Command("git", "log", "--oneline", "@{u}..")
cmd.Dir = dotfilesPath
output, err = cmd.Output()
if err == nil {
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(lines) > 0 && lines[0] != "" {
status.Unpushed = len(lines)
}
}
return status
}
func getCondensedStatus() (string, string) {
var statusParts []string
var hashInfo string
// Check trash status
homeDir, _ := os.UserHomeDir()
trashPath := filepath.Join(homeDir, ".local", "share", "Trash", "files")
if entries, err := os.ReadDir(trashPath); err == nil {
count := len(entries)
if count > 0 {
statusParts = append(statusParts, fmt.Sprintf("[!] %d file(s) in trash", count))
}
}
// Check dotfiles status
dotfilesStatus := checkDotfilesStatus()
if dotfilesStatus != nil {
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, fmt.Sprintf("%sdotfiles is dirty%s", colors["yellow"], colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] untracked%s", colors["red"], dotfilesStatus.Untracked, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] modified%s", colors["yellow"], dotfilesStatus.Modified, colors["reset"]))
statusParts = append(statusParts, fmt.Sprintf("%s[%d] staged%s", colors["green"], dotfilesStatus.Staged, colors["reset"]))
}
if dotfilesStatus.CommitHash != "" {
hashInfo = fmt.Sprintf("%s[%s%s%s]%s", colors["white"], colors["blue"], dotfilesStatus.CommitHash, colors["white"], colors["reset"])
if dotfilesStatus.IsDirty {
statusParts = append(statusParts, hashInfo)
hashInfo = ""
}
}
if dotfilesStatus.Unpushed > 0 {
statusParts = append(statusParts, fmt.Sprintf("%s[!] You have %d commit(s) to push%s", colors["yellow"], dotfilesStatus.Unpushed, colors["reset"]))
}
} else {
statusParts = append(statusParts, "Unable to check dotfiles status")
}
statusLine := ""
if len(statusParts) > 0 {
statusLine = strings.Join(statusParts, " - ")
}
return statusLine, hashInfo
}
func runDotfilesCommand(args ...string) (string, error) {
cmd := exec.Command("dotfiles", args...)
output, err := cmd.Output()
if err != nil {
return "", err
}
return strings.TrimSpace(string(output)), nil
}
func getDistroIcon() (string, string) {
distroIcons := map[string]DistroIcon{
"windows": {"\uf17a", colors["blue"]}, // blue
"linux": {"\uf17c", colors["yellow"]}, // yellow
"ubuntu": {"\uf31b", "\033[38;5;208m"}, // orange (ANSI 208)
"debian": {"\uf306", colors["red"]}, // red
"arch": {"\uf303", colors["cyan"]}, // cyan
"fedora": {"\uf30a", colors["blue"]}, // blue
"alpine": {"\uf300", colors["cyan"]}, // cyan
"macos": {"\uf179", colors["white"]}, // white
"darwin": {"\uf179", colors["white"]}, // white
"osx": {"\uf179", colors["white"]}, // white
}
distro, err := runDotfilesCommand("variables", "get", "Platform.Distro", "--format", "raw")
if err != nil {
distro = strings.ToLower(runtime.GOOS)
} else {
distro = strings.ToLower(distro)
}
if icon, exists := distroIcons[distro]; exists {
return icon.Icon, icon.Color
}
// Try partial match
for key, icon := range distroIcons {
if strings.Contains(distro, key) {
return icon.Icon, icon.Color
}
}
return "", ""
}
func detectShell() string {
// Check for PowerShell profile
if os.Getenv("PROFILE") != "" || os.Getenv("PW_SH_PROFILE") != "" || os.Getenv("PSModulePath") != "" {
return "powershell"
}
if shell := os.Getenv("SHELL"); shell != "" {
return filepath.Base(shell)
}
if comspec := os.Getenv("COMSPEC"); comspec != "" {
if strings.HasSuffix(strings.ToLower(comspec), "cmd.exe") {
if os.Getenv("PROFILE") != "" {
return "Powershell"
}
return "CMD"
}
return filepath.Base(comspec)
}
return "unknown"
}
func welcome() {
printLogo()
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown-host"
}
// Get distro icon
distroIcon, iconColor := getDistroIcon()
// Get username
username := os.Getenv("USER")
if username == "" {
username = os.Getenv("USERNAME")
}
if username == "" {
username = "user"
}
// Get SSH login info
sshLogin := getLastSSHLogin()
// Get shell and arch
shell := detectShell()
arch := runtime.GOARCH
// Capitalize shell and arch for display
shellDisp := strings.Title(shell)
archDisp := strings.ToUpper(arch)
// Get package managers
pkgMgrs, err := runDotfilesCommand("variables", "get", "Platform.AvailablePackageManagers", "--format", "raw")
if err != nil {
pkgMgrs = ""
}
// Compact single line: user@hostname with icon, shell, arch
fmt.Printf("%s%s%s@%s%s", colors["green"], username, colors["cyan"], colors["yellow"], hostname)
if distroIcon != "" {
fmt.Printf(" %s%s", iconColor, distroIcon)
}
fmt.Printf("%s running %s%s%s/%s%s", colors["cyan"], colors["blue"], shellDisp, colors["cyan"], colors["purple"], archDisp)
if pkgMgrs != "" {
// Parse and color package managers
pkgMgrs = strings.Trim(pkgMgrs, "[]")
pmList := strings.Fields(strings.ReplaceAll(pkgMgrs, ",", ""))
pmColors := []string{colors["yellow"], colors["green"], colors["cyan"], colors["red"], colors["blue"]}
var coloredPMs []string
for i, pm := range pmList {
color := pmColors[i%len(pmColors)]
coloredPMs = append(coloredPMs, fmt.Sprintf("%s%s", color, pm))
}
fmt.Printf("%s [%s%s]", colors["cyan"], strings.Join(coloredPMs, colors["cyan"]+"/"), colors["reset"])
} else {
fmt.Printf("%s", colors["reset"])
}
// Get status info
condensedStatus, hashInfo := getCondensedStatus()
// Add hash to same line if dotfiles is clean
if hashInfo != "" {
fmt.Printf(" %s", hashInfo)
}
fmt.Println()
// Display last SSH login info if available
if sshLogin != "" {
fmt.Printf("%s%s\n", sshLogin, colors["reset"])
}
// Display condensed status line only if there are issues
if condensedStatus != "" {
fmt.Printf("%s%s%s\n", colors["yellow"], condensedStatus, colors["reset"])
}
}

View File

@@ -1,748 +0,0 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
// Color constants for terminal output
const (
Red = "\033[0;31m"
Green = "\033[0;32m"
Yellow = "\033[1;33m"
Blue = "\033[0;34m"
Cyan = "\033[0;36m"
Bold = "\033[1m"
NC = "\033[0m" // No Color
)
// ProcessInfo holds information about a process using a port
type ProcessInfo struct {
PID int
ProcessName string
Protocol string
DockerInfo string
}
// DockerContainer represents a Docker container
type DockerContainer struct {
Name string
Image string
Ports []PortMapping
Network string
}
// PortMapping represents a port mapping
type PortMapping struct {
ContainerPort int
HostPort int
Protocol string
IPv6 bool
}
func main() {
if len(os.Args) < 2 {
showUsage()
os.Exit(1)
}
arg := os.Args[1]
switch arg {
case "--help", "-h":
showHelp()
case "--list", "-l":
listDockerServices()
default:
port, err := strconv.Atoi(arg)
if err != nil || port < 1 || port > 65535 {
fmt.Printf("%sError:%s Invalid port number. Must be between 1 and 65535.\n", Red, NC)
os.Exit(1)
}
checkPort(port)
}
}
func showUsage() {
fmt.Printf("%sUsage:%s inuse <port_number>\n", Red, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
fmt.Printf("%s inuse --help%s\n", Yellow, NC)
fmt.Printf("%sExample:%s inuse 80\n", Yellow, NC)
fmt.Printf("%s inuse --list%s\n", Yellow, NC)
}
func showHelp() {
fmt.Printf("%s%sinuse - Check if a port is in use%s\n\n", Cyan, Bold, NC)
fmt.Printf("%sUSAGE:%s\n", Bold, NC)
fmt.Printf(" inuse <port_number> Check if a specific port is in use\n")
fmt.Printf(" inuse --list, -l List all Docker services with listening ports\n")
fmt.Printf(" inuse --help, -h Show this help message\n\n")
fmt.Printf("%sEXAMPLES:%s\n", Bold, NC)
fmt.Printf(" %sinuse 80%s Check if port 80 is in use\n", Green, NC)
fmt.Printf(" %sinuse 3000%s Check if port 3000 is in use\n", Green, NC)
fmt.Printf(" %sinuse --list%s Show all Docker services with ports\n\n", Green, NC)
fmt.Printf("%sDESCRIPTION:%s\n", Bold, NC)
fmt.Printf(" The inuse function checks if a specific port is in use and identifies\n")
fmt.Printf(" the process using it. It can detect regular processes, Docker containers\n")
fmt.Printf(" with published ports, and containers using host networking.\n\n")
fmt.Printf("%sOUTPUT:%s\n", Bold, NC)
fmt.Printf(" %s✓%s Port is in use - shows process name, PID, and Docker info if applicable\n", Green, NC)
fmt.Printf(" %s✗%s Port is free\n", Red, NC)
fmt.Printf(" %s⚠%s Port is in use but process cannot be identified\n", Yellow, NC)
}
func listDockerServices() {
if !isDockerAvailable() {
fmt.Printf("%sError:%s Docker is not available\n", Red, NC)
os.Exit(1)
}
fmt.Printf("%s%sDocker Services with Listening Ports:%s\n\n", Cyan, Bold, NC)
containers := getRunningContainers()
if len(containers) == 0 {
fmt.Printf("%sNo running Docker containers found%s\n", Yellow, NC)
return
}
foundServices := false
for _, container := range containers {
if len(container.Ports) > 0 {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s📦 %s%s%s %s(%s)%s\n", Green, Bold, container.Name, NC, Cyan, cleanImage, NC)
for _, port := range container.Ports {
ipv6Marker := ""
if port.IPv6 {
ipv6Marker = " [IPv6]"
}
fmt.Printf("%s ├─ Port %s%d%s%s → %d (%s)%s%s\n",
Cyan, Bold, port.HostPort, NC, Cyan, port.ContainerPort, port.Protocol, ipv6Marker, NC)
}
fmt.Println()
foundServices = true
}
}
// Check for host networking containers
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s%sHost Networking Containers:%s\n", Yellow, Bold, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s🌐 %s%s%s %s(%s)%s %s- uses host networking%s\n",
Yellow, Bold, container.Name, NC, Cyan, cleanImage, NC, Yellow, NC)
}
fmt.Println()
foundServices = true
}
if !foundServices {
fmt.Printf("%sNo Docker services with exposed ports found%s\n", Yellow, NC)
}
}
func checkPort(port int) {
// Check if port is in use first
if !isPortInUse(port) {
fmt.Printf("%s✗ Port %d is FREE%s\n", Red, port, NC)
os.Exit(1)
}
// Port is in use, now find what's using it
process := findProcessUsingPort(port)
if process != nil {
dockerInfo := ""
if process.DockerInfo != "" {
dockerInfo = " " + process.DockerInfo
}
fmt.Printf("%s✓ Port %d (%s) in use by %s%s%s %sas PID %s%d%s%s\n",
Green, port, process.Protocol, Bold, process.ProcessName, NC, Green, Bold, process.PID, NC, dockerInfo)
return
}
// Check if it's a Docker container
containerInfo := findDockerContainerUsingPort(port)
if containerInfo != "" {
fmt.Printf("%s✓ Port %d in use by Docker container %s\n", Green, port, containerInfo)
return
}
// If we still haven't found the process, check for host networking containers more thoroughly
hostNetworkProcess := findHostNetworkingProcess(port)
if hostNetworkProcess != "" {
fmt.Printf("%s✓ Port %d likely in use by %s\n", Green, port, hostNetworkProcess)
return
}
// If we still haven't found the process
fmt.Printf("%s⚠ Port %d is in use but unable to identify the process%s\n", Yellow, port, NC)
if isDockerAvailable() {
hostContainers := getHostNetworkingContainers()
if len(hostContainers) > 0 {
fmt.Printf("%s Note: Found Docker containers using host networking:%s\n", Cyan, NC)
for _, container := range hostContainers {
cleanImage := cleanImageName(container.Image)
fmt.Printf("%s - %s (%s)%s\n", Cyan, container.Name, cleanImage, NC)
}
fmt.Printf("%s These containers share the host's network, so one of them might be using this port%s\n", Cyan, NC)
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
} else {
fmt.Printf("%s This might be due to insufficient permissions or the process being in a different namespace%s\n", Cyan, NC)
}
}
func isPortInUse(port int) bool {
// Try ss first
if isCommandAvailable("ss") {
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
// Try netstat as fallback
if isCommandAvailable("netstat") {
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err == nil {
portPattern := fmt.Sprintf(":%d ", port)
return strings.Contains(string(output), portPattern)
}
}
return false
}
func findProcessUsingPort(port int) *ProcessInfo {
// Method 1: Try netstat
if process := tryNetstat(port); process != nil {
return process
}
// Method 2: Try ss
if process := trySS(port); process != nil {
return process
}
// Method 3: Try lsof
if process := tryLsof(port); process != nil {
return process
}
// Method 4: Try fuser
if process := tryFuser(port); process != nil {
return process
}
return nil
}
func tryNetstat(port int) *ProcessInfo {
if !isCommandAvailable("netstat") {
return nil
}
cmd := exec.Command("netstat", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "netstat", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
fields := strings.Fields(line)
if len(fields) >= 7 {
pidProcess := fields[6]
parts := strings.Split(pidProcess, "/")
if len(parts) >= 2 {
if pid, err := strconv.Atoi(parts[0]); err == nil {
processName := parts[1]
protocol := fields[0]
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func trySS(port int) *ProcessInfo {
if !isCommandAvailable("ss") {
return nil
}
cmd := exec.Command("ss", "-tulpn")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "ss", "-tulpn")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portPattern := fmt.Sprintf(":%d ", port)
pidRegex := regexp.MustCompile(`pid=(\d+)`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, portPattern) {
matches := pidRegex.FindStringSubmatch(line)
if len(matches) >= 2 {
if pid, err := strconv.Atoi(matches[1]); err == nil {
processName := getProcessName(pid)
if processName != "" {
fields := strings.Fields(line)
protocol := ""
if len(fields) > 0 {
protocol = fields[0]
}
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: protocol,
DockerInfo: dockerInfo,
}
}
}
}
}
}
return nil
}
func tryLsof(port int) *ProcessInfo {
if !isCommandAvailable("lsof") {
return nil
}
cmd := exec.Command("lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err := cmd.Output()
if err != nil {
// Try with sudo if available
if isCommandAvailable("sudo") {
cmd = exec.Command("sudo", "lsof", "-i", fmt.Sprintf(":%d", port), "-n", "-P")
output, err = cmd.Output()
if err != nil {
return nil
}
} else {
return nil
}
}
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "LISTEN") {
fields := strings.Fields(line)
if len(fields) >= 2 {
processName := fields[0]
if pid, err := strconv.Atoi(fields[1]); err == nil {
dockerInfo := getDockerInfo(pid, processName, port)
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: dockerInfo,
}
}
}
}
}
return nil
}
func tryFuser(port int) *ProcessInfo {
if !isCommandAvailable("fuser") {
return nil
}
cmd := exec.Command("fuser", fmt.Sprintf("%d/tcp", port))
output, err := cmd.Output()
if err != nil {
return nil
}
pids := strings.Fields(string(output))
for _, pidStr := range pids {
if pid, err := strconv.Atoi(strings.TrimSpace(pidStr)); err == nil {
processName := getProcessName(pid)
if processName != "" {
return &ProcessInfo{
PID: pid,
ProcessName: processName,
Protocol: "tcp",
DockerInfo: "",
}
}
}
}
return nil
}
func getProcessName(pid int) string {
cmd := exec.Command("ps", "-p", strconv.Itoa(pid), "-o", "comm=")
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getDockerInfo(pid int, processName string, port int) string {
if !isDockerAvailable() {
return ""
}
// Check if it's docker-proxy (handle truncated names like "docker-pr")
if processName == "docker-proxy" || strings.HasPrefix(processName, "docker-pr") {
containerName := getContainerByPublishedPort(port)
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s(Docker: %s, image: %s)%s", Cyan, containerName, cleanImage, NC)
}
return fmt.Sprintf("%s(Docker proxy)%s", Cyan, NC)
}
// Check if process is in a Docker container using cgroup
containerInfo := getContainerByPID(pid)
if containerInfo != "" {
return fmt.Sprintf("%s(Docker: %s)%s", Cyan, containerInfo, NC)
}
// Check if this process might be in a host networking container
hostContainer := checkHostNetworkingContainer(pid, processName)
if hostContainer != "" {
return fmt.Sprintf("%s(Docker host network: %s)%s", Cyan, hostContainer, NC)
}
return ""
}
func getContainerByPID(pid int) string {
cgroupPath := fmt.Sprintf("/proc/%d/cgroup", pid)
file, err := os.Open(cgroupPath)
if err != nil {
return ""
}
defer file.Close()
scanner := bufio.NewScanner(file)
containerIDRegex := regexp.MustCompile(`[a-f0-9]{64}`)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "docker") {
matches := containerIDRegex.FindStringSubmatch(line)
if len(matches) > 0 {
containerID := matches[0]
containerName := getContainerNameByID(containerID)
if containerName != "" {
return containerName
}
return containerID[:12]
}
}
}
return ""
}
func findDockerContainerUsingPort(port int) string {
if !isDockerAvailable() {
return ""
}
// Check for containers with published ports
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
containerName := strings.TrimSpace(string(output))
if containerName != "" {
image := getContainerImage(containerName)
cleanImage := cleanImageName(image)
return fmt.Sprintf("%s%s%s %s(published port, image: %s)%s", Bold, containerName, NC, Cyan, cleanImage, NC)
}
return ""
}
func isDockerAvailable() bool {
return isCommandAvailable("docker")
}
func isCommandAvailable(command string) bool {
_, err := exec.LookPath(command)
return err == nil
}
func getRunningContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Ports: getContainerPorts(containerName),
}
containers = append(containers, container)
}
}
return containers
}
func getHostNetworkingContainers() []DockerContainer {
if !isDockerAvailable() {
return nil
}
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", "network=host")
output, err := cmd.Output()
if err != nil {
return nil
}
var containers []DockerContainer
scanner := bufio.NewScanner(strings.NewReader(string(output)))
for scanner.Scan() {
containerName := strings.TrimSpace(scanner.Text())
if containerName != "" {
container := DockerContainer{
Name: containerName,
Image: getContainerImage(containerName),
Network: "host",
}
containers = append(containers, container)
}
}
return containers
}
func getContainerImage(containerName string) string {
cmd := exec.Command("docker", "inspect", containerName)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if image, ok := inspectData[0]["Config"].(map[string]interface{})["Image"].(string); ok {
return image
}
}
return ""
}
func getContainerPorts(containerName string) []PortMapping {
cmd := exec.Command("docker", "port", containerName)
output, err := cmd.Output()
if err != nil {
return nil
}
var ports []PortMapping
scanner := bufio.NewScanner(strings.NewReader(string(output)))
portRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> 0\.0\.0\.0:(\d+)`)
ipv6PortRegex := regexp.MustCompile(`(\d+)/(tcp|udp) -> \[::\]:(\d+)`)
for scanner.Scan() {
line := scanner.Text()
// Check for IPv4
if matches := portRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: false,
})
}
// Check for IPv6
if matches := ipv6PortRegex.FindStringSubmatch(line); len(matches) >= 4 {
containerPort, _ := strconv.Atoi(matches[1])
protocol := matches[2]
hostPort, _ := strconv.Atoi(matches[3])
ports = append(ports, PortMapping{
ContainerPort: containerPort,
HostPort: hostPort,
Protocol: protocol,
IPv6: true,
})
}
}
return ports
}
func getContainerByPublishedPort(port int) string {
cmd := exec.Command("docker", "ps", "--format", "{{.Names}}", "--filter", fmt.Sprintf("publish=%d", port))
output, err := cmd.Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(output))
}
func getContainerNameByID(containerID string) string {
cmd := exec.Command("docker", "inspect", containerID)
output, err := cmd.Output()
if err != nil {
return ""
}
var inspectData []map[string]interface{}
if err := json.Unmarshal(output, &inspectData); err != nil {
return ""
}
if len(inspectData) > 0 {
if name, ok := inspectData[0]["Name"].(string); ok {
return strings.TrimPrefix(name, "/")
}
}
return ""
}
func cleanImageName(image string) string {
// Remove SHA256 hashes
shaRegex := regexp.MustCompile(`sha256:[a-f0-9]*`)
cleaned := shaRegex.ReplaceAllString(image, "[image-hash]")
// Remove registry prefixes, keep only the last part
parts := strings.Split(cleaned, "/")
if len(parts) > 0 {
return parts[len(parts)-1]
}
return cleaned
}
func findHostNetworkingProcess(port int) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Check if this container might be using the port
if isContainerUsingPort(container.Name, port) {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s%s%s %s(Docker host network: %s)%s", Bold, container.Name, NC, Cyan, cleanImage, NC)
}
}
return ""
}
func isContainerUsingPort(containerName string, port int) bool {
// Try to execute netstat inside the container to see if it's listening on the port
cmd := exec.Command("docker", "exec", containerName, "sh", "-c",
fmt.Sprintf("netstat -tlnp 2>/dev/null | grep ':%d ' || ss -tlnp 2>/dev/null | grep ':%d '", port, port))
output, err := cmd.Output()
if err != nil {
return false
}
return len(output) > 0
}
func checkHostNetworkingContainer(pid int, processName string) string {
if !isDockerAvailable() {
return ""
}
// Get all host networking containers and check if any match this process
hostContainers := getHostNetworkingContainers()
for _, container := range hostContainers {
// Try to find this process inside the container
cmd := exec.Command("docker", "exec", container.Name, "sh", "-c",
fmt.Sprintf("ps -o pid,comm | grep '%s' | grep -q '%d\\|%s'", processName, pid, processName))
err := cmd.Run()
if err == nil {
cleanImage := cleanImageName(container.Image)
return fmt.Sprintf("%s (%s)", container.Name, cleanImage)
}
}
return ""
}

View File

@@ -1,249 +0,0 @@
#!/usr/bin/env python3
import os
import subprocess
import argparse
import requests
def get_physical_interfaces():
"""
Retrieve a list of physical network interfaces on the system.
This function checks the `/sys/class/net/` directory to identify physical
network interfaces. It determines if an interface is physical by verifying
the presence of a symbolic link to a `device` directory.
Returns:
list: A list of strings, where each string is the name of a physical
network interface.
"""
interfaces_path = '/sys/class/net/'
physical_interfaces = []
for interface in os.listdir(interfaces_path):
if not os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
physical_interfaces.append(interface)
return physical_interfaces
def get_virtual_interfaces():
"""
Retrieves a list of virtual network interfaces on the system.
This function scans the network interfaces available in the '/sys/class/net/'
directory and filters out physical interfaces and the loopback interface ('lo').
It identifies virtual interfaces by checking if the 'device' path is not a
symbolic link.
Returns:
list: A list of virtual network interface names as strings.
"""
interfaces_path = '/sys/class/net/'
virtual_interfaces = []
for interface in os.listdir(interfaces_path):
if os.path.islink(os.path.join(interfaces_path, interface, 'device')):
continue
if interface == 'lo':
continue
virtual_interfaces.append(interface)
return virtual_interfaces
def get_up_interfaces(interfaces):
"""
Filters the given list of interfaces to include only those that are up or unknown.
Args:
interfaces (list): A list of interface names.
Returns:
list: A list of interface names that are up or treated as up (e.g., UNKNOWN).
"""
up_interfaces = []
for interface in interfaces:
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
if "state UP" in result.stdout or "state UNKNOWN" in result.stdout:
up_interfaces.append(interface)
except Exception:
continue
return up_interfaces
def get_interface_state(interface):
"""
Retrieve the state and MAC address of a network interface.
Args:
interface (str): The name of the network interface.
Returns:
tuple: A tuple containing the state (str) and MAC address (str) of the interface.
"""
try:
result = subprocess.run(['ip', 'link', 'show', interface],
capture_output=True, text=True, check=True)
lines = result.stdout.splitlines()
state = "UNKNOWN"
mac = "N/A"
if len(lines) > 0:
if "state UP" in lines[0]:
state = "UP"
elif "state DOWN" in lines[0]:
state = "DOWN"
elif "state UNKNOWN" in lines[0]:
state = "UP" # Treat UNKNOWN as UP
if len(lines) > 1:
mac = lines[1].strip().split()[1] if len(lines[1].strip().split()) > 1 else "N/A"
return state, mac
except Exception:
return "UNKNOWN", "N/A"
def get_external_ips():
"""
Fetch both IPv4 and IPv6 external IP addresses of the machine.
This function first attempts to retrieve an IP address using the services
`https://ifconfig.co`, `https://ifconfig.io`, and `https://ifconfig.me`. If the
first IP fetched is IPv6, it explicitly tries to fetch an IPv4 address using
curl's `-4` option.
Returns:
tuple: A tuple containing the IPv4 and IPv6 addresses as strings. If either
address cannot be fetched, it will be set to "Unavailable".
"""
services = ["https://ip.mvl.sh", "https://ifconfig.co", "https://api.ipify.org", "https://myexternalip.com/raw", "https://ifconfig.io", "https://ifconfig.me"]
headers = {"User-Agent": "curl"}
ipv4, ipv6 = "Unavailable", "Unavailable"
for service in services:
try:
response = requests.get(service, headers=headers, timeout=0.2)
if response.status_code == 200:
ip = response.text.strip()
if ":" in ip: # IPv6 address
ipv6 = ip
# Try to fetch IPv4 explicitly
ipv4_response = subprocess.run(
["curl", "-4", "--silent", service],
capture_output=True,
text=True,
timeout=0.2,
check=True
)
if ipv4_response.returncode == 0:
ipv4 = ipv4_response.stdout.strip()
else: # IPv4 address
ipv4 = ip
if ipv4 != "Unavailable" and ipv6 != "Unavailable":
break
except (requests.RequestException, subprocess.TimeoutExpired):
continue
return ipv4, ipv6
def display_interface_details(show_physical=False, show_virtual=False, show_all=False, show_external_ip=False, show_ipv6=False):
"""
Display details of network interfaces based on the specified flags.
Args:
show_physical (bool): Show physical interfaces (UP by default unless combined with show_all).
show_virtual (bool): Show virtual interfaces (UP by default unless combined with show_all).
show_all (bool): Include all interfaces (UP, DOWN, UNKNOWN).
show_external_ip (bool): Fetch and display the external IP address.
show_ipv6 (bool): Include IPv6 addresses in the output.
Notes:
- By default, only IPv4 addresses are shown unless `-6` is specified.
- IPv6 addresses are displayed in a separate column if `-6` is specified.
"""
if show_external_ip:
ipv4, ipv6 = get_external_ips()
print(f"External IPv4: {ipv4}")
print(f"External IPv6: {ipv6}")
print("-" * 70)
interfaces = []
if show_all:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_physical_interfaces())
if show_virtual:
interfaces.extend(get_virtual_interfaces())
else:
if show_physical or not show_virtual: # Default to physical if no `-v`
interfaces.extend(get_up_interfaces(get_physical_interfaces()))
if show_virtual or not show_physical: # Default to virtual if no `-p`
interfaces.extend(get_up_interfaces(get_virtual_interfaces()))
interfaces.sort()
# Define column widths based on expected maximum content length
col_widths = {
'interface': 15,
'ipv4': 18,
'ipv6': 40 if show_ipv6 else 0, # Hide IPv6 column if not showing IPv6
'subnet': 10,
'state': 10,
'mac': 18
}
# Print header with proper formatting
header = f"{'Interface':<{col_widths['interface']}} {'IPv4 Address':<{col_widths['ipv4']}}"
if show_ipv6:
header += f" {'IPv6 Address':<{col_widths['ipv6']}}"
header += f" {'Subnet':<{col_widths['subnet']}} {'State':<{col_widths['state']}} {'MAC Address':<{col_widths['mac']}}"
print(header)
print("-" * (col_widths['interface'] + col_widths['ipv4'] + (col_widths['ipv6'] if show_ipv6 else 0) + col_widths['subnet'] + col_widths['state'] + col_widths['mac']))
for interface in interfaces:
try:
result = subprocess.run(['ip', '-br', 'addr', 'show', interface],
capture_output=True, text=True, check=True)
state, mac = get_interface_state(interface)
if result.returncode == 0:
lines = result.stdout.strip().splitlines()
ipv4 = "N/A"
ipv6 = "N/A"
subnet = ""
for line in lines:
parts = line.split()
if len(parts) >= 3:
ip_with_mask = parts[2]
# Check if the address is IPv4 or IPv6
if ":" in ip_with_mask: # IPv6
ipv6 = ip_with_mask.split('/')[0]
else: # IPv4
ipv4 = ip_with_mask.split('/')[0]
subnet = ip_with_mask.split('/')[1] if '/' in ip_with_mask else ""
row = f"{interface:<{col_widths['interface']}} {ipv4:<{col_widths['ipv4']}}"
if show_ipv6:
row += f" {ipv6:<{col_widths['ipv6']}}"
row += f" {subnet:<{col_widths['subnet']}} {state:<{col_widths['state']}} {mac:<{col_widths['mac']}}"
print(row)
except Exception as e:
print(f"Error fetching details for {interface}: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Display network interface information')
parser.add_argument('-p', action='store_true', help='Show physical interfaces (UP by default)')
parser.add_argument('-v', action='store_true', help='Show virtual interfaces (UP by default)')
parser.add_argument('-a', action='store_true', help='Include all interfaces (UP, DOWN, UNKNOWN)')
parser.add_argument('-e', action='store_true', help='Fetch and display the external IP address')
parser.add_argument('--ipv6', '-6', action='store_true', help='Include IPv6 addresses in the output')
args = parser.parse_args()
# Default to showing both UP physical and virtual interfaces if no flags are specified
display_interface_details(show_physical=args.p or not (args.p or args.v or args.a or args.e),
show_virtual=args.v or not (args.p or args.v or args.a or args.e),
show_all=args.a,
show_external_ip=args.e,
show_ipv6=args.ipv6)

View File

@@ -1,298 +0,0 @@
#!/bin/bash
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
KOBOLD_PATH="/mnt/data/ai/llm/koboldcpp-linux-x64"
KOBOLD_MODEL="/mnt/data/ai/llm/Mistral-Small-24B-Instruct-2501-Q4_K_M.gguf" # Default model
SILLYTAVERN_SCREEN="sillytavern"
KOBOLD_SCREEN="koboldcpp"
# Function to check if a screen session exists
check_screen() {
screen -ls | grep -q "\.${1}\s"
}
# Function to list available models
list_models() {
echo -e "${BLUE}Available models:${NC}"
ls -1 /mnt/data/ai/llm/*.gguf | nl -w2 -s'. '
}
# Function to select a model
select_model() {
list_models
echo
read -p "Select model number (or press Enter for default): " model_num
if [[ -z "$model_num" ]]; then
echo -e "${YELLOW}Using default model: $(basename "$KOBOLD_MODEL")${NC}"
else
selected_model=$(ls -1 /mnt/data/ai/llm/*.gguf | sed -n "${model_num}p")
if [[ -n "$selected_model" ]]; then
KOBOLD_MODEL="$selected_model"
echo -e "${GREEN}Selected model: $(basename "$KOBOLD_MODEL")${NC}"
else
echo -e "${RED}Invalid selection. Using default model.${NC}"
fi
fi
}
# Function to start SillyTavern
start_sillytavern() {
echo -e "${YELLOW}Starting SillyTavern in screen session '${SILLYTAVERN_SCREEN}'...${NC}"
screen -dmS "$SILLYTAVERN_SCREEN" bash -c "sillytavern --listen 0.0.0.0"
sleep 2
if check_screen "$SILLYTAVERN_SCREEN"; then
echo -e "${GREEN}✓ SillyTavern started successfully!${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:8000${NC}"
else
echo -e "${RED}✗ Failed to start SillyTavern${NC}"
fi
}
# Function to start KoboldCPP
start_koboldcpp() {
select_model
echo -e "${YELLOW}Starting KoboldCPP in screen session '${KOBOLD_SCREEN}'...${NC}"
screen -dmS "$KOBOLD_SCREEN" bash -c "cd /mnt/data/ai/llm && ./koboldcpp-linux-x64 --model '$KOBOLD_MODEL' --host 0.0.0.0 --port 5001 --contextsize 8192 --gpulayers 999"
sleep 2
if check_screen "$KOBOLD_SCREEN"; then
echo -e "${GREEN}✓ KoboldCPP started successfully!${NC}"
echo -e "${BLUE} Model: $(basename "$KOBOLD_MODEL")${NC}"
echo -e "${BLUE} Access at: http://0.0.0.0:5001${NC}"
else
echo -e "${RED}✗ Failed to start KoboldCPP${NC}"
fi
}
# Function to stop a service
stop_service() {
local service=$1
local screen_name=$2
echo -e "${YELLOW}Stopping ${service}...${NC}"
screen -S "$screen_name" -X quit
sleep 1
if ! check_screen "$screen_name"; then
echo -e "${GREEN}✓ ${service} stopped successfully${NC}"
else
echo -e "${RED}✗ Failed to stop ${service}${NC}"
fi
}
# Function to show service status
show_status() {
echo -e "${CYAN}╔═══════════════════════════════════════╗${NC}"
echo -e "${CYAN}║ Service Status Overview ║${NC}"
echo -e "${CYAN}╚═══════════════════════════════════════╝${NC}"
echo
local st_running=false
local kc_running=false
# Check SillyTavern
if check_screen "$SILLYTAVERN_SCREEN"; then
st_running=true
echo -e " ${GREEN}●${NC} SillyTavern: ${GREEN}Running${NC} (screen: ${SILLYTAVERN_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:8000${NC}"
else
echo -e " ${RED}●${NC} SillyTavern: ${RED}Not running${NC}"
fi
echo
# Check KoboldCPP
if check_screen "$KOBOLD_SCREEN"; then
kc_running=true
echo -e " ${GREEN}●${NC} KoboldCPP: ${GREEN}Running${NC} (screen: ${KOBOLD_SCREEN})"
echo -e " ${BLUE}→ http://0.0.0.0:5001${NC}"
else
echo -e " ${RED}●${NC} KoboldCPP: ${RED}Not running${NC}"
fi
echo
}
# Function to handle service management
manage_services() {
local st_running=$(check_screen "$SILLYTAVERN_SCREEN" && echo "true" || echo "false")
local kc_running=$(check_screen "$KOBOLD_SCREEN" && echo "true" || echo "false")
# If both services are running
if [[ "$st_running" == "true" ]] && [[ "$kc_running" == "true" ]]; then
echo -e "${GREEN}Both services are running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Attach to KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Restart KoboldCPP"
echo "5) Stop all services"
echo "6) Exit"
read -p "Your choice (1-6): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
5)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
6)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only SillyTavern is running
elif [[ "$st_running" == "true" ]]; then
echo -e "${YELLOW}Only SillyTavern is running.${NC}"
echo
echo "1) Attach to SillyTavern"
echo "2) Start KoboldCPP"
echo "3) Restart SillyTavern"
echo "4) Stop SillyTavern"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to SillyTavern... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$SILLYTAVERN_SCREEN"
;;
2)
start_koboldcpp
;;
3)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
echo
start_sillytavern
;;
4)
stop_service "SillyTavern" "$SILLYTAVERN_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If only KoboldCPP is running
elif [[ "$kc_running" == "true" ]]; then
echo -e "${YELLOW}Only KoboldCPP is running.${NC}"
echo
echo "1) Attach to KoboldCPP"
echo "2) Start SillyTavern"
echo "3) Restart KoboldCPP"
echo "4) Stop KoboldCPP"
echo "5) Exit"
read -p "Your choice (1-5): " choice
case $choice in
1)
echo -e "${BLUE}Attaching to KoboldCPP... (Use Ctrl+A then D to detach)${NC}"
sleep 1
screen -r "$KOBOLD_SCREEN"
;;
2)
start_sillytavern
;;
3)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
echo
start_koboldcpp
;;
4)
stop_service "KoboldCPP" "$KOBOLD_SCREEN"
;;
5)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
# If no services are running
else
echo -e "${YELLOW}No services are running.${NC}"
echo
echo "1) Start both services"
echo "2) Start SillyTavern only"
echo "3) Start KoboldCPP only"
echo "4) Exit"
read -p "Your choice (1-4): " choice
case $choice in
1)
start_sillytavern
echo
start_koboldcpp
;;
2)
start_sillytavern
;;
3)
start_koboldcpp
;;
4)
exit 0
;;
*)
echo -e "${RED}Invalid choice${NC}"
;;
esac
fi
}
# Main script
echo -e "${BLUE}╔═══════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ LLM Services Manager ║${NC}"
echo -e "${BLUE}╚═══════════════════════════════════════╝${NC}"
echo
# Show status
show_status
# Show separator and manage services
echo -e "${CYAN}═══════════════════════════════════════${NC}"
manage_services
echo
echo -e "${BLUE}Quick reference:${NC}"
echo "• List sessions: screen -ls"
echo "• Attach: screen -r <name>"
echo "• Detach: Ctrl+A then D"

View File

@@ -1,119 +0,0 @@
# SSH Utility - Smart SSH Connection Manager
A transparent SSH wrapper that automatically chooses between local and remote connections based on network connectivity.
## What it does
This utility acts as a drop-in replacement for the `ssh` command that intelligently routes connections:
- When you type `ssh desktop`, it automatically checks if your local network is available
- If local: connects via `desktop-local` (faster local connection)
- If remote: connects via `desktop` (Tailscale/VPN connection)
- All other SSH usage passes through unchanged (`ssh --help`, `ssh user@host`, etc.)
## Installation
The utility is automatically compiled and installed to `~/.local/bin/ssh` via Ansible when you run your dotfiles setup.
## Configuration
1. Copy the example config:
```bash
mkdir -p ~/.config/ssh-util
cp ~/.dotfiles/config/ssh-util/config.yaml ~/.config/ssh-util/
```
2. Edit `~/.config/ssh-util/config.yaml` to match your setup:
```yaml
smart_aliases:
desktop:
primary: "desktop-local" # SSH config entry for local connection
fallback: "desktop" # SSH config entry for remote connection
check_host: "192.168.86.22" # IP to ping for connectivity test
timeout: "2s" # Ping timeout
```
3. Ensure your `~/.ssh/config` contains the referenced host entries:
```
Host desktop
HostName mennos-desktop
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
Host desktop-local
HostName 192.168.86.22
User menno
Port 400
ForwardAgent yes
AddKeysToAgent yes
```
## Usage
Once configured, simply use SSH as normal:
```bash
# Smart connection - automatically chooses local vs remote
ssh desktop
# All other SSH usage works exactly the same
ssh --help
ssh --version
ssh user@example.com
ssh -L 8080:localhost:80 server
```
## How it works
1. When you run `ssh <alias>`, the utility checks if `<alias>` is defined in the smart_aliases config
2. If yes, it pings the `check_host` IP address
3. If ping succeeds: executes `ssh <primary>` instead
4. If ping fails: executes `ssh <fallback>` instead
5. If not a smart alias: passes through to real SSH unchanged
## Troubleshooting
### SSH utility not found
Make sure `~/.local/bin` is in your PATH:
```bash
echo $PATH | grep -o ~/.local/bin
```
### Config not loading
Check the config file exists and has correct syntax:
```bash
ls -la ~/.config/ssh-util/config.yaml
cat ~/.config/ssh-util/config.yaml
```
### Connectivity test failing
Test manually:
```bash
ping -c 1 -W 2 192.168.86.22
```
### Falls back to real SSH
If there are any errors loading config or parsing, the utility safely falls back to executing the real SSH binary at `/usr/bin/ssh`.
## Adding more aliases
To add more smart aliases, just extend the config:
```yaml
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.86.22"
timeout: "2s"
server:
primary: "server-local"
fallback: "server-remote"
check_host: "192.168.1.100"
timeout: "1s"
```
Remember to create the corresponding entries in your `~/.ssh/config`.

View File

@@ -1,90 +0,0 @@
# SSH Utility Configuration
# This file defines smart aliases that automatically choose between local and remote connections
# Logging configuration
logging:
enabled: true
# Levels: debug, info, warn, error
level: "info"
# Formats: console, json
format: "console"
smart_aliases:
desktop:
primary: "desktop-local"
fallback: "desktop"
check_host: "192.168.1.254"
timeout: "2s"
laptop:
primary: "laptop-local"
fallback: "laptop"
check_host: "192.168.1.253"
timeout: "2s"
# Background SSH Tunnel Definitions
tunnels:
# Example: Desktop database tunnel
desktop-database:
type: local
local_port: 5432
remote_host: database
remote_port: 5432
ssh_host: desktop # Uses smart alias logic (desktop-local/desktop)
# Example: Development API tunnel
dev-api:
type: local
local_port: 8080
remote_host: api
remote_port: 80
ssh_host: dev-server
# Example: SOCKS proxy tunnel
socks-proxy:
type: dynamic
local_port: 1080
ssh_host: bastion
# Modem web interface tunnel
modem-web:
type: local
local_port: 8443
remote_host: 192.168.1.1
remote_port: 443
ssh_host: desktop
# Tunnel Management Commands:
# ssh --tunnel --open desktop-database (or ssh -TO desktop-database)
# ssh --tunnel --close desktop-database (or ssh -TC desktop-database)
# ssh --tunnel --list (or ssh -TL)
#
# Ad-hoc tunnels (not in config):
# ssh -TO temp-api --local 8080:api:80 --via server
# Logging options:
# - enabled: true/false - whether to show any logs
# - level: debug (verbose), info (normal), warn (warnings only), error (errors only)
# - format: console (human readable), json (structured)
# Logs are written to stderr so they don't interfere with SSH output
# How it works:
# 1. When you run: ssh desktop
# 2. The utility pings 192.168.86.22 with a 2s timeout
# 3. If ping succeeds: runs "ssh desktop-local" instead
# 4. If ping fails: runs "ssh desktop" instead
# 5. All other SSH usage (flags, user@host, etc.) passes through unchanged
# Your SSH config should contain the actual host definitions:
# Host desktop
# HostName mennos-desktop
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes
#
# Host desktop-local
# HostName 192.168.86.22
# User menno
# Port 400
# ForwardAgent yes
# AddKeysToAgent yes

View File

@@ -1,20 +0,0 @@
module ssh-util
go 1.21
require (
github.com/jedib0t/go-pretty/v6 v6.4.9
github.com/rs/zerolog v1.31.0
github.com/spf13/cobra v1.8.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.12.0 // indirect
)

View File

@@ -1,46 +0,0 @@
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jedib0t/go-pretty/v6 v6.4.9 h1:vZ6bjGg2eBSrJn365qlxGcaWu09Id+LHtrfDWlB2Usc=
github.com/jedib0t/go-pretty/v6 v6.4.9/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM=
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,51 +0,0 @@
---
- name: WSL2 1Password SSH Agent Bridge
block:
- name: Ensure required packages are installed for 1Password sock bridge
ansible.builtin.package:
name:
# 1Password (WSL2 required package for sock bridge)
- socat
state: present
become: true
- name: Ensure .1password directory exists in home
ansible.builtin.file:
path: "{{ ansible_env.HOME }}/.1password"
state: directory
mode: '0700'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
- name: Create .agent-bridge.sh in home directory
ansible.builtin.copy:
dest: "{{ ansible_env.HOME }}/.agent-bridge.sh"
mode: '0755'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
content: |
# Code extracted from https://stuartleeks.com/posts/wsl-ssh-key-forward-to-windows/
# (IMPORTANT) Create the folder on your root for the `agent.sock` (How mentioned by @rfay and @Lochnair in the comments)
mkdir -p ~/.1password
# Configure ssh forwarding
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
# need `ps -ww` to get non-truncated command for matching
# use square brackets to generate a regex match for the process we want but that doesn't match the grep command running it!
ALREADY_RUNNING=$(ps -auxww | grep -q "[n]piperelay.exe -ei -s //./pipe/openssh-ssh-agent"; echo $?)
if [[ $ALREADY_RUNNING != "0" ]]; then
if [[ -S $SSH_AUTH_SOCK ]]; then
# not expecting the socket to exist as the forwarding command isn't running (http://www.tldp.org/LDP/abs/html/fto.html)
echo "removing previous socket..."
rm $SSH_AUTH_SOCK
fi
echo "Starting SSH-Agent relay..."
# setsid to force new session to keep running
# set socat to listen on $SSH_AUTH_SOCK and forward to npiperelay which then forwards to openssh-ssh-agent on windows
(setsid socat UNIX-LISTEN:$SSH_AUTH_SOCK,fork EXEC:"npiperelay.exe -ei -s //./pipe/openssh-ssh-agent",nofork &) >/dev/null 2>&1
fi
tags:
- wsl
- wsl2

View File

@@ -1,93 +0,0 @@
---
- name: Borg Backup Installation and Configuration
block:
- name: Check if Borg is already installed
ansible.builtin.command: which borg
register: borg_check
ignore_errors: true
changed_when: false
- name: Ensure Borg is installed
ansible.builtin.package:
name: borg
state: present
become: true
when: borg_check.rc != 0
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg directories
ansible.builtin.file:
path: "{{ borg_dir }}"
state: directory
mode: "0755"
loop:
- "{{ borg_config_dir }}"
- "/mnt/object_storage"
loop_control:
loop_var: borg_dir
become: true
- name: Check if Borg repository exists
ansible.builtin.stat:
path: "{{ borg_repo_dir }}/config"
register: borg_repo_check
become: true
- name: Initialize Borg repository
ansible.builtin.command: >
borg init --encryption=repokey {{ borg_repo_dir }}
environment:
BORG_PASSPHRASE: "{{ borg_passphrase }}"
become: true
when: not borg_repo_check.stat.exists
- name: Create Borg backup script
ansible.builtin.template:
src: templates/borg-backup.sh.j2
dest: "{{ borg_config_dir }}/backup.sh"
mode: "0755"
become: true
- name: Create Borg systemd service
ansible.builtin.template:
src: templates/borg-backup.service.j2
dest: /etc/systemd/system/borg-backup.service
mode: "0644"
become: true
register: borg_service
- name: Create Borg systemd timer
ansible.builtin.template:
src: templates/borg-backup.timer.j2
dest: /etc/systemd/system/borg-backup.timer
mode: "0644"
become: true
register: borg_timer
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: borg_service.changed or borg_timer.changed
- name: Enable and start Borg backup timer
ansible.builtin.systemd:
name: borg-backup.timer
enabled: true
state: started
become: true
- name: Display Borg backup status
ansible.builtin.debug:
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
tags:
- borg-backup
- borg
- backup

View File

@@ -1,95 +0,0 @@
---
- name: Borg Local Sync Installation and Configuration
block:
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg local sync script
template:
src: borg-local-sync.sh.j2
dest: /usr/local/bin/borg-local-sync.sh
mode: "0755"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- name: Create Borg local sync systemd service
template:
src: borg-local-sync.service.j2
dest: /etc/systemd/system/borg-local-sync.service
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
tags:
- borg-local-sync
- name: Create Borg local sync systemd timer
template:
src: borg-local-sync.timer.j2
dest: /etc/systemd/system/borg-local-sync.timer
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
- restart borg-local-sync-timer
tags:
- borg-local-sync
- name: Create log file for Borg local sync
file:
path: /var/log/borg-local-sync.log
state: touch
owner: root
group: root
mode: "0644"
become: yes
tags:
- borg-local-sync
- name: Enable and start Borg local sync timer
systemd:
name: borg-local-sync.timer
enabled: yes
state: started
daemon_reload: yes
become: yes
tags:
- borg-local-sync
- name: Add logrotate configuration for Borg local sync
copy:
content: |
/var/log/borg-local-sync.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 root root
}
dest: /etc/logrotate.d/borg-local-sync
mode: "0644"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- borg
- backup
tags:
- borg-local-sync
- borg
- backup

View File

@@ -1,88 +0,0 @@
---
- name: Dynamic DNS setup
block:
- name: Create systemd environment file for dynamic DNS
ansible.builtin.template:
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
dest: "/etc/dynamic-dns-systemd.env"
mode: "0600"
owner: root
group: root
become: true
- name: Create dynamic DNS wrapper script
ansible.builtin.copy:
dest: "/usr/local/bin/dynamic-dns-update.sh"
mode: "0755"
content: |
#!/bin/bash
# Run dynamic DNS update (binary compiled by utils.yml)
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
become: true
- name: Create dynamic DNS systemd timer
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.timer"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update Timer
Requires=dynamic-dns.service
[Timer]
OnCalendar=*:0/15
Persistent=true
[Install]
WantedBy=timers.target
become: true
register: ddns_timer
- name: Create dynamic DNS systemd service
ansible.builtin.copy:
dest: "/etc/systemd/system/dynamic-dns.service"
mode: "0644"
content: |
[Unit]
Description=Dynamic DNS Update
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/dynamic-dns-update.sh
EnvironmentFile=/etc/dynamic-dns-systemd.env
User={{ ansible_user }}
Group={{ ansible_user }}
[Install]
WantedBy=multi-user.target
become: true
register: ddns_service
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: ddns_timer.changed or ddns_service.changed
- name: Enable and start dynamic DNS timer
ansible.builtin.systemd:
name: dynamic-dns.timer
enabled: true
state: started
become: true
- name: Display setup completion message
ansible.builtin.debug:
msg: |
Dynamic DNS setup complete!
- Systemd timer: sudo systemctl status dynamic-dns.timer
- Check logs: sudo journalctl -u dynamic-dns.service -f
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
when: inventory_hostname == 'mennos-desktop'
tags:
- dynamic-dns

View File

@@ -1,94 +0,0 @@
---
- name: JuiceFS Installation and Configuration
block:
- name: Check if JuiceFS is already installed
ansible.builtin.command: which juicefs
register: juicefs_check
ignore_errors: true
changed_when: false
- name: Install JuiceFS using the automatic installer
ansible.builtin.shell: curl -sSL https://d.juicefs.com/install | sh -
register: juicefs_installation
when: juicefs_check.rc != 0
become: true
- name: Verify JuiceFS installation
ansible.builtin.command: juicefs version
register: juicefs_version
changed_when: false
when: juicefs_check.rc != 0 or juicefs_installation.changed
- name: Create mount directory
ansible.builtin.file:
path: /mnt/object_storage
state: directory
mode: "0755"
become: true
- name: Create cache directory
ansible.builtin.file:
path: /var/jfsCache
state: directory
mode: "0755"
become: true
- name: Configure JuiceFS network performance optimizations
ansible.builtin.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "net.core.rmem_max", value: "16777216" }
- { name: "net.core.wmem_max", value: "16777216" }
- { name: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" }
- { name: "net.ipv4.tcp_wmem", value: "4096 65536 16777216" }
- name: Set JuiceFS facts
ansible.builtin.set_fact:
hetzner_access_key: "{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_ACCESS_KEY_ID') }}"
hetzner_secret_key:
"{{ lookup('community.general.onepassword', 'Hetzner Object Storage Bucket', vault='Dotfiles', field='AWS_SECRET_ACCESS_KEY')
}}"
redis_password: "{{ lookup('community.general.onepassword', 'JuiceFS (Redis)', vault='Dotfiles', field='password') }}"
- name: Create JuiceFS systemd service file
ansible.builtin.template:
src: templates/juicefs.service.j2
dest: /etc/systemd/system/juicefs.service
owner: root
group: root
mode: "0644"
become: true
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
- name: Include JuiceFS Redis tasks
ansible.builtin.include_tasks: services/redis/redis.yml
when: inventory_hostname == 'mennos-desktop'
- name: Enable and start JuiceFS service
ansible.builtin.systemd:
name: juicefs.service
enabled: true
state: started
become: true
- name: Check if JuiceFS is mounted
ansible.builtin.shell: df -h | grep /mnt/object_storage
become: true
register: mount_check
ignore_errors: true
changed_when: false
- name: Display mount status
ansible.builtin.debug:
msg: "JuiceFS is successfully mounted at /mnt/object_storage"
when: mount_check.rc == 0
tags:
- juicefs

View File

@@ -1,157 +0,0 @@
---
- name: Server setup
block:
- name: Ensure openssh-server is installed on Arch-based systems
ansible.builtin.package:
name: openssh
state: present
when: ansible_pkg_mgr == 'pacman'
- name: Ensure openssh-server is installed on non-Arch systems
ansible.builtin.package:
name: openssh-server
state: present
when: ansible_pkg_mgr != 'pacman'
- name: Ensure Borg is installed on Arch-based systems
ansible.builtin.package:
name: borg
state: present
become: true
when: ansible_pkg_mgr == 'pacman'
- name: Ensure Borg is installed on Debian/Ubuntu systems
ansible.builtin.package:
name: borgbackup
state: present
become: true
when: ansible_pkg_mgr != 'pacman'
- name: Include JuiceFS tasks
ansible.builtin.include_tasks: juicefs.yml
tags:
- juicefs
- name: Include Dynamic DNS tasks
ansible.builtin.include_tasks: dynamic-dns.yml
tags:
- dynamic-dns
- name: Include Borg Backup tasks
ansible.builtin.include_tasks: borg-backup.yml
tags:
- borg-backup
- name: Include Borg Local Sync tasks
ansible.builtin.include_tasks: borg-local-sync.yml
tags:
- borg-local-sync
- name: System performance optimizations
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
reload: true
become: true
loop:
- { name: "fs.file-max", value: "2097152" } # Max open files for the entire system
- { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have
- { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory
- { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches
- { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket
- { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input
- { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state
- { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections
- name: Include service tasks
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
loop_control:
label: "{{ item.name }}"
tags:
- services
- always
vars:
services:
- name: dashy
enabled: true
hosts:
- mennos-desktop
- name: gitea
enabled: true
hosts:
- mennos-desktop
- name: factorio
enabled: true
hosts:
- mennos-desktop
- name: dozzle
enabled: true
hosts:
- mennos-desktop
- name: beszel
enabled: true
hosts:
- mennos-desktop
- name: caddy
enabled: true
hosts:
- mennos-desktop
- name: golink
enabled: true
hosts:
- mennos-desktop
- name: immich
enabled: true
hosts:
- mennos-desktop
- name: plex
enabled: true
hosts:
- mennos-desktop
- name: tautulli
enabled: true
hosts:
- mennos-desktop
- name: stash
enabled: true
hosts:
- mennos-desktop
- name: downloaders
enabled: true
hosts:
- mennos-desktop
- name: wireguard
enabled: true
hosts:
- mennos-desktop
- name: nextcloud
enabled: true
hosts:
- mennos-desktop
- name: echoip
enabled: true
hosts:
- mennos-desktop
- name: arr-stack
enabled: true
hosts:
- mennos-desktop
- name: home-assistant
enabled: true
hosts:
- mennos-desktop
- name: privatebin
enabled: true
hosts:
- mennos-desktop
- name: unifi-network-application
enabled: true
hosts:
- mennos-desktop
- name: avorion
enabled: true
hosts:
- mennos-desktop

View File

@@ -1,38 +0,0 @@
---
- name: Deploy ArrStack service
block:
- name: Set ArrStack directories
ansible.builtin.set_fact:
arr_stack_service_dir: "{{ ansible_env.HOME }}/.services/arr-stack"
arr_stack_data_dir: "/mnt/services/arr-stack"
- name: Create ArrStack directory
ansible.builtin.file:
path: "{{ arr_stack_service_dir }}"
state: directory
mode: "0755"
- name: Create ArrStack data directory
ansible.builtin.file:
path: "{{ arr_stack_data_dir }}"
state: directory
mode: "0755"
- name: Deploy ArrStack docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ arr_stack_service_dir }}/docker-compose.yml"
mode: "0644"
register: arr_stack_template_result
- name: Stop ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" down --remove-orphans
when: arr_stack_template_result.changed
- name: Start ArrStack service
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" up -d
when: arr_stack_template_result.changed
tags:
- services
- arr_stack
- arr-stack

View File

@@ -1,181 +0,0 @@
name: arr-stack
services:
radarr:
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 7878:7878
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/radarr-config:/config
- /mnt/data:/mnt/data
restart: "unless-stopped"
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
sonarr:
image: linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/sonarr-config:/config
- /mnt/data:/mnt/data
ports:
- 8989:8989
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
whisparr:
image: ghcr.io/hotio/whisparr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
ports:
- 6969:6969
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- {{ arr_stack_data_dir }}/whisparr-config:/config
- /mnt/data:/mnt/data
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 2G
prowlarr:
container_name: prowlarr
image: linuxserver/prowlarr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/prowlarr-config:/config
extra_hosts:
- host.docker.internal:host-gateway
ports:
- 9696:9696
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 512M
flaresolverr:
image: ghcr.io/flaresolverr/flaresolverr:latest
container_name: flaresolverr
environment:
- LOG_LEVEL=${LOG_LEVEL:-info}
- LOG_HTML=${LOG_HTML:-false}
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
- TZ=Europe/Amsterdam
ports:
- "8191:8191"
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 1G
overseerr:
image: sctx/overseerr:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ arr_stack_data_dir }}/overseerr-config:/app/config
ports:
- 5055:5055
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
networks:
- arr_stack_net
- caddy_network
deploy:
resources:
limits:
memory: 512M
tdarr:
image: ghcr.io/haveagitgat/tdarr:latest
container_name: tdarr
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- serverIP=0.0.0.0
- serverPort=8266
- webUIPort=8265
- internalNode=true
- inContainer=true
- ffmpegVersion=7
- nodeName=MyInternalNode
- auth=false
- openBrowser=true
- maxLogSizeMB=10
- cronPluginUpdate=
- NVIDIA_DRIVER_CAPABILITIES=all
- NVIDIA_VISIBLE_DEVICES=all
volumes:
- {{ arr_stack_data_dir }}/tdarr-server:/app/server
- {{ arr_stack_data_dir }}/tdarr-config:/app/configs
- {{ arr_stack_data_dir }}/tdarr-logs:/app/logs
- /mnt/data:/media
- {{ arr_stack_data_dir }}/tdarr-cache:/temp
ports:
- 8265:8265
- 8266:8266
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
runtime: nvidia
devices:
- /dev/dri:/dev/dri
networks:
- arr_stack_net
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
networks:
arr_stack_net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Avorion service
block:
- name: Set Avorion directories
ansible.builtin.set_fact:
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
avorion_data_dir: "/mnt/services/avorion"
- name: Create Avorion directory
ansible.builtin.file:
path: "{{ avorion_service_dir }}"
state: directory
mode: "0755"
- name: Create Avorion data directory
ansible.builtin.file:
path: "{{ avorion_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Avorion docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ avorion_service_dir }}/docker-compose.yml"
mode: "0644"
register: avorion_compose
- name: Stop Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
when: avorion_compose.changed
- name: Start Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
when: avorion_compose.changed
tags:
- services
- avorion

View File

@@ -1,15 +0,0 @@
services:
avorion:
image: rfvgyhn/avorion:latest
volumes:
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
ports:
- 27000:27000
- 27000:27000/udp
- 27003:27003/udp
- 27020:27020/udp
- 27021:27021/udp
deploy:
resources:
limits:
memory: 4G

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Beszel service
block:
- name: Set Beszel directories
ansible.builtin.set_fact:
beszel_service_dir: "{{ ansible_env.HOME }}/.services/beszel"
beszel_data_dir: "/mnt/services/beszel"
- name: Create Beszel directory
ansible.builtin.file:
path: "{{ beszel_service_dir }}"
state: directory
mode: "0755"
- name: Create Beszel data directory
ansible.builtin.file:
path: "{{ beszel_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Beszel docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ beszel_service_dir }}/docker-compose.yml"
mode: "0644"
register: beszel_compose
- name: Stop Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" down --remove-orphans
when: beszel_compose.changed
- name: Start Beszel service
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" up -d
when: beszel_compose.changed
tags:
- services
- beszel

View File

@@ -1,37 +0,0 @@
services:
beszel:
image: 'henrygd/beszel'
restart: unless-stopped
ports:
- '8090:8090'
volumes:
- {{beszel_data_dir}}/data:/beszel_data
- {{beszel_data_dir}}/socket:/beszel_socket
networks:
- beszel-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
beszel-agent:
image: henrygd/beszel-agent:latest
restart: unless-stopped
network_mode: host
volumes:
- {{beszel_data_dir}}/socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKkSIQDh1vS8lG+2Uw/9dK1eOgCHVCgQfP+Bfk4XPkdn'
deploy:
resources:
limits:
memory: 128M
networks:
beszel-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,228 +0,0 @@
# Global configuration for country blocking
{
servers {
protocols h1 h2 h3
}
}
# Country blocking snippet using MaxMind GeoLocation - reusable across all sites
{% if enable_country_blocking | default(false) and allowed_countries_codes | default([]) | length > 0 %}
(country_block) {
@allowed_local {
remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
}
@not_allowed_countries {
not remote_ip 127.0.0.1 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 157.180.41.167 2a01:4f9:c013:1a13::1
not {
maxmind_geolocation {
db_path "/etc/caddy/geoip/GeoLite2-Country.mmdb"
allow_countries {{ allowed_countries_codes | join(' ') }}
}
}
}
respond @not_allowed_countries "Access denied" 403
}
{% else %}
(country_block) {
# Country blocking disabled
}
{% endif %}
{% if inventory_hostname == 'mennos-desktop' %}
git.mvl.sh {
import country_block
reverse_proxy gitea:3000
tls {{ caddy_email }}
}
git.vleeuwen.me {
import country_block
redir https://git.mvl.sh{uri}
tls {{ caddy_email }}
}
df.mvl.sh {
import country_block
redir / https://git.mvl.sh/vleeuwenmenno/dotfiles/raw/branch/master/setup.sh
tls {{ caddy_email }}
}
fsm.mvl.sh {
import country_block
reverse_proxy factorio-server-manager:80
tls {{ caddy_email }}
}
fsm.vleeuwen.me {
import country_block
redir https://fsm.mvl.sh{uri}
tls {{ caddy_email }}
}
beszel.mvl.sh {
import country_block
reverse_proxy beszel:8090
tls {{ caddy_email }}
}
beszel.vleeuwen.me {
import country_block
redir https://beszel.mvl.sh{uri}
tls {{ caddy_email }}
}
photos.mvl.sh {
import country_block
reverse_proxy immich:2283
tls {{ caddy_email }}
}
photos.vleeuwen.me {
import country_block
redir https://photos.mvl.sh{uri}
tls {{ caddy_email }}
}
home.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
home.vleeuwen.me {
import country_block
reverse_proxy host.docker.internal:8123 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
unifi.mvl.sh {
reverse_proxy unifi-controller:8443 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh {
reverse_proxy unifi-controller:8843 {
transport http {
tls_insecure_skip_verify
}
header_up Host {host}
}
tls {{ caddy_email }}
}
hotspot.mvl.sh:80 {
redir https://hotspot.mvl.sh{uri} permanent
}
bin.mvl.sh {
import country_block
reverse_proxy privatebin:8080
tls {{ caddy_email }}
}
ip.mvl.sh ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
http://ip.mvl.sh http://ip.vleeuwen.me {
import country_block
reverse_proxy echoip:8080 {
header_up X-Real-IP {http.request.remote.host}
}
}
overseerr.mvl.sh {
import country_block
reverse_proxy overseerr:5055
tls {{ caddy_email }}
}
overseerr.vleeuwen.me {
import country_block
redir https://overseerr.mvl.sh{uri}
tls {{ caddy_email }}
}
plex.mvl.sh {
import country_block
reverse_proxy host.docker.internal:32400 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
plex.vleeuwen.me {
import country_block
redir https://plex.mvl.sh{uri}
tls {{ caddy_email }}
}
tautulli.mvl.sh {
import country_block
reverse_proxy host.docker.internal:8181 {
header_up Host {upstream_hostport}
header_up X-Real-IP {http.request.remote.host}
}
tls {{ caddy_email }}
}
tautulli.vleeuwen.me {
import country_block
redir https://tautulli.mvl.sh{uri}
tls {{ caddy_email }}
}
drive.mvl.sh drive.vleeuwen.me {
import country_block
# CalDAV and CardDAV redirects
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
# Handle other .well-known requests
handle /.well-known/* {
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
}
# Main reverse proxy configuration with proper headers
reverse_proxy nextcloud:80 {
header_up Host {host}
header_up X-Real-IP {http.request.remote.host}
}
# Security headers
header {
# HSTS header for enhanced security (required by Nextcloud)
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
# Additional security headers recommended for Nextcloud
X-Content-Type-Options "nosniff"
X-Frame-Options "SAMEORIGIN"
Referrer-Policy "no-referrer"
X-XSS-Protection "1; mode=block"
X-Permitted-Cross-Domain-Policies "none"
X-Robots-Tag "noindex, nofollow"
}
tls {{ caddy_email }}
}
{% endif %}

View File

@@ -1,15 +0,0 @@
FROM caddy:2.9.1-builder AS builder
RUN xcaddy build \
--with github.com/porech/caddy-maxmind-geolocation
FROM caddy:2.9.1-alpine
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
# Create directory for MaxMind databases and logs
RUN mkdir -p /etc/caddy/geoip /var/log/caddy
EXPOSE 80 443
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]

View File

@@ -1,59 +0,0 @@
---
- name: Deploy Caddy service
block:
- name: Set Caddy directories
ansible.builtin.set_fact:
caddy_service_dir: "{{ ansible_env.HOME }}/.services/caddy"
caddy_data_dir: "/mnt/services/caddy"
geoip_db_path: "/mnt/services/echoip"
caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}"
- name: Create Caddy directory
ansible.builtin.file:
path: "{{ caddy_service_dir }}"
state: directory
mode: "0755"
- name: Setup country blocking
ansible.builtin.include_tasks: country-blocking.yml
- name: Copy Dockerfile for custom Caddy build
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
register: caddy_dockerfile
- name: Create Caddy network
ansible.builtin.command: docker network create caddy_default
register: create_caddy_network
failed_when:
- create_caddy_network.rc != 0
- "'already exists' not in create_caddy_network.stderr"
changed_when: create_caddy_network.rc == 0
- name: Deploy Caddy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ caddy_service_dir }}/docker-compose.yml"
mode: "0644"
register: caddy_compose
- name: Deploy Caddy Caddyfile
ansible.builtin.template:
src: Caddyfile.j2
dest: "{{ caddy_service_dir }}/Caddyfile"
mode: "0644"
register: caddy_file
- name: Stop Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
when: caddy_compose.changed or caddy_file.changed
- name: Start Caddy service
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
when: caddy_compose.changed or caddy_file.changed
tags:
- caddy
- services
- reverse-proxy

View File

@@ -1,50 +0,0 @@
---
- name: Country blocking setup for Caddy with MaxMind GeoLocation
block:
- name: Copy Dockerfile for custom Caddy build with GeoIP
ansible.builtin.copy:
src: Dockerfile
dest: "{{ caddy_service_dir }}/Dockerfile"
mode: "0644"
when: enable_country_blocking | default(false)
- name: Check if MaxMind Country database is available
ansible.builtin.stat:
path: "{{ geoip_db_path }}/GeoLite2-Country.mmdb"
register: maxmind_country_db
when: enable_country_blocking | default(false)
- name: Ensure log directory exists for Caddy
ansible.builtin.file:
path: "{{ caddy_data_dir }}/logs"
state: directory
mode: "0755"
become: true
when: enable_country_blocking | default(false)
- name: Display country blocking configuration
ansible.builtin.debug:
msg:
- "✅ Country blocking enabled: {{ enable_country_blocking | default(false) }}"
- "🛡️ Countries to allow: {{ allowed_countries_codes | default([]) | join(', ') }}"
- "📍 Using MaxMind GeoLocation plugin"
- "💾 Database path: /etc/caddy/geoip/GeoLite2-Country.mmdb"
- "📊 Database available: {{ maxmind_country_db.stat.exists | default(false) }}"
when: enable_country_blocking | default(false)
- name: Warn if MaxMind database not found
ansible.builtin.debug:
msg:
- "⚠️ WARNING: MaxMind Country database not found!"
- "Expected location: {{ geoip_db_path }}/GeoLite2-Country.mmdb"
- "Country blocking will not work until EchoIP service is deployed"
- "Run: dotf update --ansible --tags echoip"
when:
- enable_country_blocking | default(false)
- not maxmind_country_db.stat.exists | default(false)
tags:
- caddy
- security
- country-blocking
- geoip

View File

@@ -1,32 +0,0 @@
services:
caddy:
build:
context: .
dockerfile: Dockerfile
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- {{ caddy_data_dir }}/data:/data
- {{ caddy_data_dir }}/config:/config
- {{ caddy_service_dir }}/Caddyfile:/etc/caddy/Caddyfile
- {{ geoip_db_path }}:/etc/caddy/geoip:ro
- {{ caddy_data_dir }}/logs:/var/log/caddy
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
deploy:
resources:
limits:
memory: 512M
networks:
caddy_network:
name: caddy_default
enable_ipv6: true

View File

@@ -1,324 +0,0 @@
pageInfo:
title: Menno's Home
navLinks: []
sections:
- name: Selfhosted
items:
- title: Plex
icon: http://mennos-desktop:4000/assets/plex.svg
url: https://plex.mvl.sh
statusCheckUrl: https://plex.mvl.sh/identity
statusCheck: true
id: 0_1035_plex
- title: Tautulli
icon: http://mennos-desktop:4000/assets/tautulli.svg
url: https://tautulli.mvl.sh
id: 1_1035_tautulli
statusCheck: true
- title: Overseerr
icon: http://mennos-desktop:4000/assets/overseerr.svg
url: https://overseerr.mvl.sh
id: 2_1035_overseerr
statusCheck: true
- title: Immich
icon: http://mennos-desktop:4000/assets/immich.svg
url: https://photos.mvl.sh
id: 3_1035_immich
statusCheck: true
- title: Nextcloud
icon: http://mennos-desktop:4000/assets/nextcloud.svg
url: https://drive.mvl.sh
id: 3_1035_nxtcld
statusCheck: true
- title: ComfyUI
icon: http://mennos-desktop:8188/assets/favicon.ico
url: http://mennos-desktop:8188
statusCheckUrl: http://host.docker.internal:8188/api/system_stats
id: 3_1035_comfyui
statusCheck: true
displayData:
sortBy: default
rows: 1
cols: 2
collapsed: false
hideForGuests: false
- name: Media Management
items:
- title: Sonarr
icon: http://mennos-desktop:4000/assets/sonarr.svg
url: http://go/sonarr
id: 0_1533_sonarr
- title: Radarr
icon: http://mennos-desktop:4000/assets/radarr.svg
url: http://go/radarr
id: 1_1533_radarr
- title: Prowlarr
icon: http://mennos-desktop:4000/assets/prowlarr.svg
url: http://go/prowlarr
id: 2_1533_prowlarr
- title: Tdarr
icon: http://mennos-desktop:4000/assets/tdarr.png
url: http://go/tdarr
id: 3_1533_tdarr
- name: Kagi
items:
- title: Kagi Search
icon: favicon
url: https://kagi.com/
id: 0_380_kagisearch
- title: Kagi Translate
icon: favicon
url: https://translate.kagi.com/
id: 1_380_kagitranslate
- title: Kagi Assistant
icon: favicon
url: https://kagi.com/assistant
id: 2_380_kagiassistant
- name: News
items:
- title: Nu.nl
icon: http://mennos-desktop:4000/assets/nunl.svg
url: https://www.nu.nl/
id: 0_380_nu
- title: Tweakers.net
icon: favicon
url: https://www.tweakers.net/
id: 1_380_tweakers
- title: NL Times
icon: favicon
url: https://www.nltimes.nl/
id: 2_380_nl_times
- name: Downloaders
items:
- title: qBittorrent
icon: http://mennos-desktop:4000/assets/qbittorrent.svg
url: http://go/qbit
id: 0_1154_qbittorrent
tags:
- download
- torrent
- yarr
- title: Sabnzbd
icon: http://mennos-desktop:4000/assets/sabnzbd.svg
url: http://go/sabnzbd
id: 1_1154_sabnzbd
tags:
- download
- nzb
- yarr
- name: Git
items:
- title: GitHub
icon: http://mennos-desktop:4000/assets/github.svg
url: https://github.com/vleeuwenmenno
id: 0_292_github
tags:
- repo
- git
- hub
- title: Gitea
icon: http://mennos-desktop:4000/assets/gitea.svg
url: http://git.mvl.sh/vleeuwenmenno
id: 1_292_gitea
tags:
- repo
- git
- tea
- name: Server Monitoring
items:
- title: Beszel
icon: http://mennos-desktop:4000/assets/beszel.svg
url: http://go/beszel
tags:
- monitoring
- logs
id: 0_1725_beszel
- title: Dozzle
icon: http://mennos-desktop:4000/assets/dozzle.svg
url: http://go/dozzle
id: 1_1725_dozzle
tags:
- monitoring
- logs
- title: UpDown.io Status
icon: far fa-signal
url: http://go/status
id: 2_1725_updowniostatus
tags:
- monitoring
- logs
- name: Tools
items:
- title: Home Assistant
icon: http://mennos-desktop:4000/assets/home-assistant.svg
url: http://go/homeassistant
id: 0_529_homeassistant
- title: Tailscale
icon: http://mennos-desktop:4000/assets/tailscale.svg
url: http://go/tailscale
id: 1_529_tailscale
- title: GliNet KVM
icon: http://mennos-desktop:4000/assets/glinet.svg
url: http://go/glkvm
id: 2_529_glinetkvm
- title: Unifi Network Controller
icon: http://mennos-desktop:4000/assets/unifi.svg
url: http://go/unifi
id: 3_529_unifinetworkcontroller
- title: Dashboard Icons
icon: favicon
url: https://dashboardicons.com/
id: 4_529_dashboardicons
- name: Weather
items:
- title: Buienradar
icon: favicon
url: https://www.buienradar.nl/weer/Beverwijk/NL/2758998
id: 0_529_buienradar
- title: ClearOutside
icon: favicon
url: https://clearoutside.com/forecast/52.49/4.66
id: 1_529_clearoutside
- title: Windy
icon: favicon
url: https://www.windy.com/
id: 2_529_windy
- title: Meteoblue
icon: favicon
url: https://www.meteoblue.com/en/country/weather/radar/the-netherlands_the-netherlands_2750405
id: 2_529_meteoblue
- name: DiscountOffice
displayData:
sortBy: default
rows: 1
cols: 3
collapsed: false
hideForGuests: false
items:
- title: DiscountOffice.nl
icon: favicon
url: https://discountoffice.nl/
id: 0_1429_discountofficenl
tags:
- do
- discount
- work
- title: DiscountOffice.be
icon: favicon
url: https://discountoffice.be/
id: 1_1429_discountofficebe
tags:
- do
- discount
- work
- title: Admin NL
icon: favicon
url: https://discountoffice.nl/administrator
id: 2_1429_adminnl
tags:
- do
- discount
- work
- title: Admin BE
icon: favicon
url: https://discountoffice.be/administrator
id: 3_1429_adminbe
tags:
- do
- discount
- work
- title: Subsites
icon: favicon
url: https://elastomappen.nl
id: 4_1429_subsites
tags:
- do
- discount
- work
- title: Proxmox
icon: http://mennos-desktop:4000/assets/proxmox.svg
url: https://www.transip.nl/cp/vps/prm/350680/
id: 5_1429_proxmox
tags:
- do
- discount
- work
- title: Transip
icon: favicon
url: https://www.transip.nl/cp/vps/prm/350680/
id: 6_1429_transip
tags:
- do
- discount
- work
- title: Kibana
icon: http://mennos-desktop:4000/assets/kibana.svg
url: http://go/kibana
id: 7_1429_kibana
tags:
- do
- discount
- work
- name: Other
items:
- title: Whisparr
icon: http://mennos-desktop:4000/assets/whisparr.svg
url: http://go/whisparr
id: 0_514_whisparr
- title: Stash
icon: http://mennos-desktop:4000/assets/stash.svg
url: http://go/stash
id: 1_514_stash
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: true
hideForGuests: true
appConfig:
layout: auto
iconSize: large
theme: nord
startingView: default
defaultOpeningMethod: sametab
statusCheck: false
statusCheckInterval: 0
routingMode: history
enableMultiTasking: false
widgetsAlwaysUseProxy: false
webSearch:
disableWebSearch: false
searchEngine: https://kagi.com/search?q=
openingMethod: newtab
searchBangs: {}
enableFontAwesome: true
enableMaterialDesignIcons: false
hideComponents:
hideHeading: false
hideNav: true
hideSearch: false
hideSettings: true
hideFooter: false
auth:
enableGuestAccess: false
users: []
enableOidc: false
oidc:
adminRole: "false"
adminGroup: "false"
enableHeaderAuth: false
headerAuth:
userHeader: REMOTE_USER
proxyWhitelist: []
enableKeycloak: false
showSplashScreen: false
preventWriteToDisk: false
preventLocalSave: false
disableConfiguration: false
disableConfigurationForNonAdmin: false
allowConfigEdit: true
enableServiceWorker: false
disableContextMenu: false
disableUpdateChecks: false
disableSmartSort: false
enableErrorReporting: false

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Dashy service
block:
- name: Set Dashy directories
ansible.builtin.set_fact:
dashy_service_dir: "{{ ansible_env.HOME }}/.services/dashy"
dashy_data_dir: "/mnt/services/dashy"
- name: Create Dashy directory
ansible.builtin.file:
path: "{{ dashy_service_dir }}"
state: directory
mode: "0755"
- name: Create Dashy data directory
ansible.builtin.file:
path: "{{ dashy_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dashy docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dashy_service_dir }}/docker-compose.yml"
mode: "0644"
register: dashy_compose
- name: Deploy Dashy config.yml
ansible.builtin.template:
src: conf.yml.j2
dest: "{{ dashy_data_dir }}/conf.yml"
mode: "0644"
register: dashy_config
- name: Stop Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" down --remove-orphans
when: dashy_compose.changed
- name: Start Dashy service
ansible.builtin.command: docker compose -f "{{ dashy_service_dir }}/docker-compose.yml" up -d
when: dashy_compose.changed
tags:
- services
- dashy

View File

@@ -1,21 +0,0 @@
services:
dashy:
image: lissy93/dashy:latest
restart: unless-stopped
ports:
- 4000:8080
volumes:
- {{dashy_data_dir}}/:/app/user-data
networks:
- caddy_network
extra_hosts:
- host.docker.internal:host-gateway
deploy:
resources:
limits:
memory: 2G
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,71 +0,0 @@
name: downloaders
services:
gluetun:
image: qmcgaw/gluetun:latest
privileged: true
cap_add:
- NET_ADMIN
networks:
- arr_stack_net
ports:
- 6881:6881
- 6881:6881/udp
- 8085:8085 # Qbittorrent
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- {{ downloaders_data_dir }}/gluetun-config:/gluetun
environment:
- PUID=1000
- PGID=100
- VPN_SERVICE_PROVIDER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='VPN_SERVICE_PROVIDER') }}
- OPENVPN_USER={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_USER') }}
- OPENVPN_PASSWORD={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='OPENVPN_PASSWORD') }}
- SERVER_COUNTRIES={{ lookup('community.general.onepassword', 'Gluetun', vault='Dotfiles', field='SERVER_COUNTRIES') }}
restart: always
deploy:
resources:
limits:
memory: 512M
sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/sabnzbd-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
restart: unless-stopped
ports:
- 7788:8080
deploy:
resources:
limits:
memory: 1G
qbittorrent:
image: lscr.io/linuxserver/qbittorrent
network_mode: "service:gluetun"
environment:
- PUID=1000
- PGID=100
- WEBUI_PORT=8085
- TZ=Europe/Amsterdam
volumes:
- {{ downloaders_data_dir }}/qbit-config:/config
- {{ local_data_dir }}:{{ local_data_dir }}
depends_on:
gluetun:
condition: service_healthy
restart: always
deploy:
resources:
limits:
memory: 1G
networks:
arr_stack_net:
external: true
name: arr_stack_net

View File

@@ -1,47 +0,0 @@
---
- name: Deploy Downloaders service
block:
- name: Set Downloaders directories
ansible.builtin.set_fact:
local_data_dir: "/mnt/data"
downloaders_service_dir: "{{ ansible_env.HOME }}/.services/downloaders"
downloaders_data_dir: "/mnt/services/downloaders"
- name: Create Downloaders directory
ansible.builtin.file:
path: "{{ downloaders_data_dir }}"
state: directory
mode: "0755"
- name: Create Downloaders service directory
ansible.builtin.file:
path: "{{ downloaders_service_dir }}"
state: directory
mode: "0755"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: true
- name: Deploy Downloaders docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ downloaders_service_dir }}/docker-compose.yml"
mode: "0644"
register: downloaders_compose
- name: Ensure arr_stack_net Docker network exists
community.docker.docker_network:
name: arr_stack_net
driver: bridge
state: present
- name: Stop Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" down --remove-orphans
when: downloaders_compose.changed
- name: Start Downloaders service
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" up -d
when: downloaders_compose.changed
tags:
- services
- downloaders

View File

@@ -1,23 +0,0 @@
services:
dozzle:
image: amir20/dozzle:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8800:8080
environment:
- DOZZLE_NO_ANALYTICS=true
restart: unless-stopped
networks:
- dozzle-net
- caddy_network
deploy:
resources:
limits:
memory: 256M
networks:
dozzle-net:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,37 +0,0 @@
---
- name: Deploy Dozzle service
block:
- name: Set Dozzle directories
ansible.builtin.set_fact:
dozzle_service_dir: "{{ ansible_env.HOME }}/.services/dozzle"
dozzle_data_dir: "/mnt/services/dozzle"
- name: Create Dozzle directory
ansible.builtin.file:
path: "{{ dozzle_service_dir }}"
state: directory
mode: "0755"
- name: Create Dozzle data directory
ansible.builtin.file:
path: "{{ dozzle_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Dozzle docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ dozzle_service_dir }}/docker-compose.yml"
mode: "0644"
register: dozzle_compose
- name: Stop Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" down --remove-orphans
when: dozzle_compose.changed
- name: Start Dozzle service
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" up -d
when: dozzle_compose.changed
tags:
- services
- dozzle

View File

@@ -1,27 +0,0 @@
services:
echoip:
container_name: 'echoip'
image: 'mpolden/echoip:latest'
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- caddy_network
volumes:
- {{echoip_data_dir}}/GeoLite2-ASN.mmdb:/opt/echoip/GeoLite2-ASN.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-City.mmdb:/opt/echoip/GeoLite2-City.mmdb:ro
- {{echoip_data_dir}}/GeoLite2-Country.mmdb:/opt/echoip/GeoLite2-Country.mmdb:ro
command: >
-p -r -H "X-Forwarded-For" -l ":8080"
-a /opt/echoip/GeoLite2-ASN.mmdb
-c /opt/echoip/GeoLite2-City.mmdb
-f /opt/echoip/GeoLite2-Country.mmdb
deploy:
resources:
limits:
memory: 128M
networks:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,169 +0,0 @@
---
- name: Deploy EchoIP service
block:
- name: Set EchoIP directories
ansible.builtin.set_fact:
echoip_service_dir: "{{ ansible_env.HOME }}/.services/echoip"
echoip_data_dir: "/mnt/services/echoip"
maxmind_account_id:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='account_id') | regex_replace('\\s+', '') }}"
maxmind_license_key:
"{{ lookup('community.general.onepassword', 'MaxMind',
vault='Dotfiles', field='license_key') | regex_replace('\\s+', '') }}"
# Requires: gather_facts: true in playbook
- name: Check last update marker file
ansible.builtin.stat:
path: "{{ echoip_data_dir }}/.last_update"
register: echoip_update_marker
- name: Determine if update is needed (older than 24h or missing)
ansible.builtin.set_fact:
update_needed: "{{ (not echoip_update_marker.stat.exists) or ((ansible_date_time.epoch | int) - (echoip_update_marker.stat.mtime | default(0) | int) > 86400) }}"
- name: Create EchoIP directory
ansible.builtin.file:
path: "{{ echoip_service_dir }}"
state: directory
mode: "0755"
- name: Create EchoIP data directory
ansible.builtin.file:
path: "{{ echoip_data_dir }}"
state: directory
mode: "0755"
# Only update databases if needed (max once per 24h)
- block:
# Touch the marker file BEFORE attempting download to prevent repeated attempts on failure
- name: Update last update marker file (pre-download)
ansible.builtin.file:
path: "{{ echoip_data_dir }}/.last_update"
state: touch
# Create directories for extracted databases
- name: Create directory for ASN database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN"
state: directory
mode: "0755"
- name: Create directory for City database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City"
state: directory
mode: "0755"
- name: Create directory for Country database extraction
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country"
state: directory
mode: "0755"
# Download all databases
- name: Download GeoLite2 ASN database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
mode: "0644"
- name: Download GeoLite2 City database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
mode: "0644"
- name: Download GeoLite2 Country database
ansible.builtin.get_url:
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key={{ maxmind_license_key }}&suffix=tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
mode: "0644"
# Extract all databases
- name: Extract GeoLite2 ASN database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-ASN"
remote_src: true
register: asn_extracted
- name: Extract GeoLite2 City database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-City"
remote_src: true
register: city_extracted
- name: Extract GeoLite2 Country database
ansible.builtin.unarchive:
src: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
dest: "{{ echoip_data_dir }}/GeoLite2-Country"
remote_src: true
register: country_extracted
# Move all databases to the correct locations
- name: Move ASN database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-ASN -name GeoLite2-ASN.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-ASN.mmdb \\;"
when: asn_extracted.changed
- name: Move City database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-City -name GeoLite2-City.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-City.mmdb \\;"
when: city_extracted.changed
- name: Move Country database to correct location
ansible.builtin.command:
cmd: "find {{ echoip_data_dir }}/GeoLite2-Country -name GeoLite2-Country.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-Country.mmdb \\;"
when: country_extracted.changed
# Clean up unnecessary files
- name: Remove downloaded tar.gz files
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
state: absent
- name: Remove extracted ASN folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-ASN"
- name: Remove downloaded City tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
state: absent
- name: Remove extracted City folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-City"
- name: Remove downloaded Country tar.gz file
ansible.builtin.file:
path: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
state: absent
- name: Remove extracted Country folder
ansible.builtin.command:
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-Country"
# Update the marker file (no longer needed here, already touched before download)
when: update_needed
# Deploy and restart the EchoIP service
- name: Deploy EchoIP docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ echoip_service_dir }}/docker-compose.yml"
mode: "0644"
register: echoip_compose
- name: Stop EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" down --remove-orphans
when: echoip_compose.changed
- name: Start EchoIP service
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" up -d
when: echoip_compose.changed
tags:
- services
- echoip

View File

@@ -1,31 +0,0 @@
services:
factorio-server-manager:
image: "ofsm/ofsm:latest"
restart: "unless-stopped"
environment:
- PUID=1000
- PGID=100
- "FACTORIO_VERSION=stable"
- "RCON_PASS=458fc84534"
ports:
- "5080:80"
- "34197:34197/udp"
volumes:
- {{ factorio_data_dir }}/fsm-data:/opt/fsm-data
- {{ factorio_data_dir }}/factorio-data/saves:/opt/factorio/saves
- {{ factorio_data_dir }}/factorio-data/mods:/opt/factorio/mods
- {{ factorio_data_dir }}/factorio-data/config:/opt/factorio/config
- {{ factorio_data_dir }}/factorio-data/mod_packs:/opt/fsm/mod_packs
networks:
- factorio
- caddy_network
deploy:
resources:
limits:
memory: 2G
networks:
factorio:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,31 +0,0 @@
---
- name: Deploy Factorio service
block:
- name: Set Factorio directories
ansible.builtin.set_fact:
factorio_service_dir: "{{ ansible_env.HOME }}/.services/factorio"
factorio_data_dir: "/mnt/services/factorio"
- name: Create Factorio directory
ansible.builtin.file:
path: "{{ factorio_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Factorio docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ factorio_service_dir }}/docker-compose.yml"
mode: "0644"
register: factorio_compose
- name: Stop Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" down --remove-orphans
when: factorio_compose.changed
- name: Start Factorio service
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" up -d
when: factorio_compose.changed
tags:
- services
- factorio

View File

@@ -1,98 +0,0 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent: /tmp/act_runner

View File

@@ -1,66 +0,0 @@
services:
gitea:
image: gitea/gitea:latest
restart: always
environment:
- PUID=1000
- PGID=100
volumes:
- {{gitea_data_dir}}/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3030:3000"
- "22:22"
networks:
- gitea
- caddy_network
deploy:
resources:
limits:
memory: 1G
postgres:
image: postgres:15-alpine
restart: always
environment:
- PUID=1000
- PGID=100
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='POSTGRES_PASSWORD') }}
- POSTGRES_DB=gitea
volumes:
- {{gitea_data_dir}}/postgres:/var/lib/postgresql/data
networks:
- gitea
deploy:
resources:
limits:
memory: 1G
act_runner:
image: gitea/act_runner:latest
volumes:
- {{gitea_service_dir}}/act-runner-config.yaml:/config.yaml
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/act_runner:/tmp/act_runner
environment:
- PUID=1000
- PGID=100
- GITEA_INSTANCE_URL=https://git.mvl.sh
- GITEA_RUNNER_REGISTRATION_TOKEN={{ lookup('community.general.onepassword', 'Gitea', vault='Dotfiles', field='GITEA_RUNNER_REGISTRATION_TOKEN') }}
- GITEA_RUNNER_NAME=act-worker
- CONFIG_FILE=/config.yaml
restart: always
networks:
- gitea
deploy:
resources:
limits:
memory: 2G
networks:
gitea:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Gitea service
block:
- name: Set Gitea directories
ansible.builtin.set_fact:
gitea_data_dir: "/mnt/services/gitea"
gitea_service_dir: "{{ ansible_env.HOME }}/.services/gitea"
- name: Create Gitea directories
ansible.builtin.file:
path: "{{ gitea_dir }}"
state: directory
mode: "0755"
loop:
- "{{ gitea_data_dir }}"
- "{{ gitea_service_dir }}"
loop_control:
loop_var: gitea_dir
- name: Deploy Gitea docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ gitea_service_dir }}/docker-compose.yml"
mode: "0644"
register: gitea_compose
- name: Deploy Gitea act-runner-config.yaml
ansible.builtin.template:
src: act-runner-config.yaml.j2
dest: "{{ gitea_service_dir }}/act-runner-config.yaml"
mode: "0644"
register: gitea_act_runner_config
- name: Stop Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" down --remove-orphans
when: gitea_compose.changed or gitea_act_runner_config.changed
- name: Start Gitea service
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" up -d
when: gitea_compose.changed or gitea_act_runner_config.changed
tags:
- services
- gitea

View File

@@ -1,14 +0,0 @@
name: golink
services:
server:
image: ghcr.io/tailscale/golink:main
user: root
environment:
- TS_AUTHKEY={{ lookup('community.general.onepassword', 'GoLink', vault='Dotfiles', field='TS_AUTHKEY') }}
volumes:
- {{ golink_data_dir }}:/home/nonroot
restart: "unless-stopped"
deploy:
resources:
limits:
memory: 256M

View File

@@ -1,36 +0,0 @@
---
- name: Deploy GoLink service
block:
- name: Set GoLink directories
ansible.builtin.set_fact:
golink_data_dir: "/mnt/services/golink"
golink_service_dir: "{{ ansible_env.HOME }}/.services/golink"
- name: Create GoLink directories
ansible.builtin.file:
path: "{{ golink_dir }}"
state: directory
mode: "0755"
loop:
- "{{ golink_data_dir }}"
- "{{ golink_service_dir }}"
loop_control:
loop_var: golink_dir
- name: Deploy GoLink docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ golink_service_dir }}/docker-compose.yml"
mode: "0644"
register: golink_compose
- name: Stop GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" down --remove-orphans
when: golink_compose.changed
- name: Start GoLink service
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" up -d
when: golink_compose.changed
tags:
- services
- golink

View File

@@ -1,21 +0,0 @@
services:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- "/var/run/dbus:/run/dbus:ro"
- {{ homeassistant_data_dir }}:/config
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=1000
restart: unless-stopped
privileged: true
network_mode: host
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
deploy:
resources:
limits:
memory: 2G

View File

@@ -1,36 +0,0 @@
---
- name: Deploy Home Assistant service
block:
- name: Set Home Assistant directories
ansible.builtin.set_fact:
homeassistant_data_dir: "/mnt/services/homeassistant"
homeassistant_service_dir: "{{ ansible_env.HOME }}/.services/homeassistant"
- name: Create Home Assistant directories
ansible.builtin.file:
path: "{{ homeassistant_dir }}"
state: directory
mode: "0755"
loop:
- "{{ homeassistant_data_dir }}"
- "{{ homeassistant_service_dir }}"
loop_control:
loop_var: homeassistant_dir
- name: Deploy Home Assistant docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ homeassistant_service_dir }}/docker-compose.yml"
mode: "0644"
register: homeassistant_compose
- name: Stop Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" down --remove-orphans
when: homeassistant_compose.changed
- name: Start Home Assistant service
ansible.builtin.command: docker compose -f "{{ homeassistant_service_dir }}/docker-compose.yml" up -d
when: homeassistant_compose.changed
tags:
- services
- homeassistant

View File

@@ -1,123 +0,0 @@
services:
immich:
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
volumes:
- {{ immich_data_dir }}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=100
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
- caddy_network
runtime: nvidia
deploy:
resources:
limits:
memory: 4G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
volumes:
- model-cache:/cache
env_file:
- .env
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=all
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich
runtime: nvidia
deploy:
resources:
limits:
memory: 8G
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 1G
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
environment:
PUID: 1000
PGID: 1000
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- {{ immich_database_dir }}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
restart: unless-stopped
networks:
- immich
deploy:
resources:
limits:
memory: 2G
volumes:
model-cache:
networks:
immich:
caddy_network:
external: true
name: caddy_default

View File

@@ -1,10 +0,0 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Europe/Amsterdam
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
DB_USERNAME=postgres
DB_PASSWORD=postgres
DB_DATABASE_NAME=immich

View File

@@ -1,44 +0,0 @@
---
- name: Deploy Immich service
block:
- name: Set Immich directories
ansible.builtin.set_fact:
immich_data_dir: "/mnt/data/photos/immich-library"
immich_database_dir: "/mnt/services/immich/postgres"
immich_service_dir: "{{ ansible_env.HOME }}/.services/immich"
- name: Create Immich directories
ansible.builtin.file:
path: "{{ immich_dir }}"
state: directory
mode: "0755"
loop:
- "{{ immich_data_dir }}"
- "{{ immich_service_dir }}"
loop_control:
loop_var: immich_dir
- name: Deploy Immich docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ immich_service_dir }}/docker-compose.yml"
mode: "0644"
register: immich_compose
- name: Deploy Immich .env
ansible.builtin.template:
src: dotenv.j2
dest: "{{ immich_service_dir }}/.env"
mode: "0644"
register: immich_compose
- name: Stop Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" down --remove-orphans
when: immich_compose.changed
- name: Start Immich service
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" up -d
when: immich_compose.changed
tags:
- services
- immich

View File

@@ -1,73 +0,0 @@
services:
nextcloud:
image: nextcloud
container_name: nextcloud
restart: unless-stopped
networks:
- nextcloud
- caddy_network
depends_on:
- nextclouddb
- redis
ports:
- 8081:80
volumes:
- {{ nextcloud_data_dir }}/nextcloud/html:/var/www/html
- {{ nextcloud_data_dir }}/nextcloud/custom_apps:/var/www/html/custom_apps
- {{ nextcloud_data_dir }}/nextcloud/config:/var/www/html/config
- {{ nextcloud_data_dir }}/nextcloud/data:/var/www/html/data
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_HOST=nextclouddb
- REDIS_HOST=redis
deploy:
resources:
limits:
memory: 2G
nextclouddb:
image: mariadb:11.4.7
container_name: nextcloud-db
restart: unless-stopped
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
networks:
- nextcloud
volumes:
- {{ nextcloud_data_dir }}/database:/var/lib/mysql
environment:
- PUID=1000
- PGID=100
- TZ=Europe/Amsterdam
- MYSQL_RANDOM_ROOT_PASSWORD=true
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
deploy:
resources:
limits:
memory: 1G
redis:
image: redis:alpine
container_name: redis
volumes:
- {{ nextcloud_data_dir }}/redis:/data
networks:
- nextcloud
deploy:
resources:
limits:
memory: 512M
networks:
nextcloud:
name: nextcloud
driver: bridge
caddy_network:
name: caddy_default
external: true

View File

@@ -1,31 +0,0 @@
---
- name: Deploy Nextcloud service
block:
- name: Set Nextcloud directories
ansible.builtin.set_fact:
nextcloud_service_dir: "{{ ansible_env.HOME }}/.services/nextcloud"
nextcloud_data_dir: "/mnt/services/nextcloud"
- name: Create Nextcloud directory
ansible.builtin.file:
path: "{{ nextcloud_service_dir }}"
state: directory
mode: "0755"
- name: Deploy Nextcloud docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ nextcloud_service_dir }}/docker-compose.yml"
mode: "0644"
register: nextcloud_compose
- name: Stop Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" down --remove-orphans
when: nextcloud_compose.changed
- name: Start Nextcloud service
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" up -d
when: nextcloud_compose.changed
tags:
- services
- nextcloud

Some files were not shown because too many files have changed in this diff Show More