Merge branch 'master' of ssh://git.mvl.sh/vleeuwenmenno/dotfiles
This commit is contained in:
@@ -255,8 +255,53 @@ def check_service_running(service_name):
|
|||||||
return len(containers)
|
return len(containers)
|
||||||
|
|
||||||
|
|
||||||
|
def get_systemd_timer_status(timer_name):
|
||||||
|
"""Check if a systemd timer is active and enabled, and get next run time"""
|
||||||
|
# Check if timer is active (running/waiting)
|
||||||
|
active_result = subprocess.run(
|
||||||
|
["sudo", "systemctl", "is-active", timer_name],
|
||||||
|
capture_output=True,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if timer is enabled (will start on boot)
|
||||||
|
enabled_result = subprocess.run(
|
||||||
|
["sudo", "systemctl", "is-enabled", timer_name],
|
||||||
|
capture_output=True,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check corresponding service status
|
||||||
|
service_name = timer_name.replace('.timer', '.service')
|
||||||
|
service_result = subprocess.run(
|
||||||
|
["sudo", "systemctl", "is-active", service_name],
|
||||||
|
capture_output=True,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get next run time
|
||||||
|
list_result = subprocess.run(
|
||||||
|
["sudo", "systemctl", "list-timers", timer_name, "--no-legend"],
|
||||||
|
capture_output=True,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
is_active = active_result.returncode == 0
|
||||||
|
is_enabled = enabled_result.returncode == 0
|
||||||
|
service_status = service_result.stdout.strip() if service_result else "unknown"
|
||||||
|
|
||||||
|
next_run = "unknown"
|
||||||
|
if list_result.returncode == 0 and list_result.stdout.strip():
|
||||||
|
parts = list_result.stdout.strip().split()
|
||||||
|
if len(parts) >= 4:
|
||||||
|
next_run = f"{parts[0]} {parts[1]} {parts[2]}"
|
||||||
|
|
||||||
|
return is_active, is_enabled, next_run, service_status
|
||||||
|
|
||||||
|
|
||||||
def cmd_list(args):
|
def cmd_list(args):
|
||||||
"""List available Docker services"""
|
"""List available Docker services and systemd services"""
|
||||||
|
# Docker services section
|
||||||
if not os.path.exists(SERVICES_DIR):
|
if not os.path.exists(SERVICES_DIR):
|
||||||
printfe("red", f"Error: Services directory not found at {SERVICES_DIR}")
|
printfe("red", f"Error: Services directory not found at {SERVICES_DIR}")
|
||||||
return 1
|
return 1
|
||||||
@@ -270,21 +315,46 @@ def cmd_list(args):
|
|||||||
|
|
||||||
if not services:
|
if not services:
|
||||||
printfe("yellow", "No Docker services found")
|
printfe("yellow", "No Docker services found")
|
||||||
return 0
|
else:
|
||||||
|
println("Available Docker services:", "blue")
|
||||||
|
for service in sorted(services):
|
||||||
|
container_count = check_service_running(service)
|
||||||
|
is_running = container_count > 0
|
||||||
|
|
||||||
println("Available Docker services:", "blue")
|
if is_running:
|
||||||
for service in sorted(services):
|
status = f"[RUNNING - {container_count} container{'s' if container_count > 1 else ''}]"
|
||||||
container_count = check_service_running(service)
|
color = "green"
|
||||||
is_running = container_count > 0
|
else:
|
||||||
|
status = "[STOPPED]"
|
||||||
|
color = "red"
|
||||||
|
|
||||||
if is_running:
|
printfe(color, f" - {service:<20} {status}")
|
||||||
status = f"[RUNNING - {container_count} container{'s' if container_count > 1 else ''}]"
|
|
||||||
|
# Systemd services section
|
||||||
|
print()
|
||||||
|
println("System services:", "blue")
|
||||||
|
|
||||||
|
systemd_timers = ["borg-backup.timer", "borg-local-sync.timer", "dynamic-dns.timer"]
|
||||||
|
|
||||||
|
for timer in systemd_timers:
|
||||||
|
is_active, is_enabled, next_run, service_status = get_systemd_timer_status(timer)
|
||||||
|
service_name = timer.replace('.timer', '')
|
||||||
|
|
||||||
|
if service_status in ["activating", "active"]:
|
||||||
|
# Service is currently running
|
||||||
|
status = f"[🔄 RUNNING - next: {next_run}]"
|
||||||
|
color = "yellow"
|
||||||
|
elif is_active and is_enabled:
|
||||||
|
status = f"[TIMER ACTIVE - next: {next_run}]"
|
||||||
color = "green"
|
color = "green"
|
||||||
|
elif is_enabled:
|
||||||
|
status = "[TIMER ENABLED - INACTIVE]"
|
||||||
|
color = "yellow"
|
||||||
else:
|
else:
|
||||||
status = "[STOPPED]"
|
status = "[TIMER DISABLED]"
|
||||||
color = "red"
|
color = "red"
|
||||||
|
|
||||||
printfe(color, f" - {service:<20} {status}")
|
printfe(color, f" - {service_name:<20} {status}")
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
81
bin/actions/source.py
Executable file
81
bin/actions/source.py
Executable file
@@ -0,0 +1,81 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Add the helpers directory to the path
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'helpers'))
|
||||||
|
from functions import printfe
|
||||||
|
|
||||||
|
def get_borg_passphrase():
|
||||||
|
"""Get Borg passphrase from 1Password"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
["op", "item", "get", "Borg Backup", "--vault=Dotfiles", "--fields=password", "--reveal"],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
printfe("red", "Error: Failed to retrieve Borg passphrase from 1Password")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Generate export commands for Borg environment variables"""
|
||||||
|
args = sys.argv[1:] if len(sys.argv) > 1 else []
|
||||||
|
|
||||||
|
# Get passphrase from 1Password
|
||||||
|
passphrase = get_borg_passphrase()
|
||||||
|
if not passphrase:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Generate the export commands
|
||||||
|
exports = [
|
||||||
|
f'export BORG_REPO="/mnt/object_storage/borg-repo"',
|
||||||
|
f'export BORG_PASSPHRASE="{passphrase}"',
|
||||||
|
f'export BORG_CACHE_DIR="/home/menno/.config/borg/cache"',
|
||||||
|
f'export BORG_CONFIG_DIR="/home/menno/.config/borg/config"',
|
||||||
|
f'export BORG_SECURITY_DIR="/home/menno/.config/borg/security"',
|
||||||
|
f'export BORG_KEYS_DIR="/home/menno/.config/borg/keys"'
|
||||||
|
]
|
||||||
|
|
||||||
|
# Check if we're being eval'd (no arguments and stdout is a pipe)
|
||||||
|
if not args and not os.isatty(sys.stdout.fileno()):
|
||||||
|
# Just output the export commands for eval
|
||||||
|
for export_cmd in exports:
|
||||||
|
print(export_cmd)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Print instructions and examples
|
||||||
|
printfe("cyan", "🔧 Borg Environment Setup")
|
||||||
|
print()
|
||||||
|
printfe("yellow", "Run the following command to setup your shell:")
|
||||||
|
print()
|
||||||
|
printfe("green", "eval $(dotf source)")
|
||||||
|
print()
|
||||||
|
printfe("red", "⚠️ Repository Permission Issue:")
|
||||||
|
printfe("white", "The Borg repository was created by root, so you need sudo:")
|
||||||
|
print()
|
||||||
|
printfe("green", "sudo -E borg list")
|
||||||
|
printfe("green", "sudo -E borg info")
|
||||||
|
print()
|
||||||
|
printfe("yellow", "Or copy and paste these exports:")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Output the export commands
|
||||||
|
for export_cmd in exports:
|
||||||
|
print(export_cmd)
|
||||||
|
|
||||||
|
print()
|
||||||
|
printfe("cyan", "📋 Borg commands (use with sudo -E):")
|
||||||
|
printfe("white", " sudo -E borg list # List all backups")
|
||||||
|
printfe("white", " sudo -E borg info # Repository info")
|
||||||
|
printfe("white", " sudo -E borg list ::archive-name # List files in backup")
|
||||||
|
printfe("white", " sudo -E borg mount . ~/borg-mount # Mount as filesystem")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
122
bin/actions/timers.py
Executable file
122
bin/actions/timers.py
Executable file
@@ -0,0 +1,122 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Add the helpers directory to the path
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'helpers'))
|
||||||
|
from functions import printfe
|
||||||
|
|
||||||
|
def run_command(cmd, capture_output=True):
|
||||||
|
"""Run a command and return the result"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, shell=True, capture_output=capture_output, text=True)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
printfe("red", f"Error running command: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def show_timer_status(timer_name, system_level=True):
|
||||||
|
"""Show concise status for a specific timer"""
|
||||||
|
cmd_prefix = "sudo systemctl" if system_level else "systemctl --user"
|
||||||
|
|
||||||
|
# Get timer status
|
||||||
|
status_cmd = f"{cmd_prefix} is-active {timer_name}"
|
||||||
|
status_result = run_command(status_cmd)
|
||||||
|
timer_status = "active" if status_result and status_result.returncode == 0 else "inactive"
|
||||||
|
|
||||||
|
# Get corresponding service status
|
||||||
|
service_name = timer_name.replace('.timer', '.service')
|
||||||
|
service_cmd = f"{cmd_prefix} is-active {service_name}"
|
||||||
|
service_result = run_command(service_cmd)
|
||||||
|
service_status = service_result.stdout.strip() if service_result else "unknown"
|
||||||
|
|
||||||
|
# Get next run time
|
||||||
|
list_cmd = f"{cmd_prefix} list-timers {timer_name} --no-legend"
|
||||||
|
list_result = run_command(list_cmd)
|
||||||
|
next_run = "unknown"
|
||||||
|
|
||||||
|
if list_result and list_result.returncode == 0 and list_result.stdout.strip():
|
||||||
|
parts = list_result.stdout.strip().split()
|
||||||
|
if len(parts) >= 4:
|
||||||
|
next_run = f"{parts[0]} {parts[1]} {parts[2]} ({parts[3]})"
|
||||||
|
|
||||||
|
# Format output based on service status
|
||||||
|
service_short = service_name.replace('.service', '')
|
||||||
|
|
||||||
|
if service_status in ["activating", "active"]:
|
||||||
|
# Service is currently running
|
||||||
|
status_color = "yellow"
|
||||||
|
status_text = f"RUNNING next: {next_run}"
|
||||||
|
symbol = "🔄"
|
||||||
|
elif timer_status == "active":
|
||||||
|
# Timer is active but service is not running
|
||||||
|
status_color = "green"
|
||||||
|
status_text = f"active next: {next_run}"
|
||||||
|
symbol = "●"
|
||||||
|
else:
|
||||||
|
# Timer is inactive
|
||||||
|
status_color = "red"
|
||||||
|
status_text = f"inactive next: {next_run}"
|
||||||
|
symbol = "●"
|
||||||
|
|
||||||
|
printfe(status_color, f"{symbol} {service_short:<12} {status_text}")
|
||||||
|
|
||||||
|
def show_examples():
|
||||||
|
"""Show example commands for checking services and logs"""
|
||||||
|
printfe("cyan", "=== Useful Commands ===")
|
||||||
|
print()
|
||||||
|
|
||||||
|
printfe("yellow", "Check service status:")
|
||||||
|
print(" sudo systemctl status borg-backup.service")
|
||||||
|
print(" sudo systemctl status borg-local-sync.service")
|
||||||
|
print(" sudo systemctl status dynamic-dns.service")
|
||||||
|
print()
|
||||||
|
|
||||||
|
printfe("yellow", "View logs:")
|
||||||
|
print(" sudo journalctl -u borg-backup.service -f")
|
||||||
|
print(" sudo journalctl -u borg-local-sync.service -f")
|
||||||
|
print(" sudo journalctl -u dynamic-dns.service -f")
|
||||||
|
print(" tail -f /var/log/borg-backup.log")
|
||||||
|
print(" tail -f /var/log/borg-local-sync.log")
|
||||||
|
print()
|
||||||
|
|
||||||
|
printfe("yellow", "Manual trigger:")
|
||||||
|
print(" sudo systemctl start borg-backup.service")
|
||||||
|
print(" sudo systemctl start borg-local-sync.service")
|
||||||
|
print(" sudo systemctl start dynamic-dns.service")
|
||||||
|
print()
|
||||||
|
|
||||||
|
printfe("yellow", "List all timers:")
|
||||||
|
print(" sudo systemctl list-timers")
|
||||||
|
print()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main timers action"""
|
||||||
|
args = sys.argv[1:] if len(sys.argv) > 1 else []
|
||||||
|
|
||||||
|
printfe("cyan", "🕐 System Timers")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Show timer statuses
|
||||||
|
timers = [
|
||||||
|
("borg-backup.timer", True),
|
||||||
|
("borg-local-sync.timer", True),
|
||||||
|
("dynamic-dns.timer", True)
|
||||||
|
]
|
||||||
|
|
||||||
|
for timer_name, system_level in timers:
|
||||||
|
if os.path.exists(f"/etc/systemd/system/{timer_name}"):
|
||||||
|
show_timer_status(timer_name, system_level)
|
||||||
|
else:
|
||||||
|
printfe("yellow", f" {timer_name.replace('.timer', ''):<12} not found")
|
||||||
|
|
||||||
|
print()
|
||||||
|
# Show helpful examples
|
||||||
|
show_examples()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
14
bin/dotf
14
bin/dotf
@@ -27,7 +27,7 @@ def run_script(script_path, args):
|
|||||||
if not os.path.isfile(script_path) or not os.access(script_path, os.X_OK):
|
if not os.path.isfile(script_path) or not os.access(script_path, os.X_OK):
|
||||||
printfe("red", f"Error: Script not found or not executable: {script_path}")
|
printfe("red", f"Error: Script not found or not executable: {script_path}")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
result = subprocess.run([script_path] + args, env={**os.environ, "DOTFILES_PATH": DOTFILES_PATH})
|
result = subprocess.run([script_path] + args, env={**os.environ, "DOTFILES_PATH": DOTFILES_PATH})
|
||||||
return result.returncode
|
return result.returncode
|
||||||
|
|
||||||
@@ -59,6 +59,14 @@ def lint(args):
|
|||||||
"""Run the lint action"""
|
"""Run the lint action"""
|
||||||
return run_script(f"{DOTFILES_BIN}/actions/lint.py", args)
|
return run_script(f"{DOTFILES_BIN}/actions/lint.py", args)
|
||||||
|
|
||||||
|
def timers(args):
|
||||||
|
"""Run the timers action"""
|
||||||
|
return run_script(f"{DOTFILES_BIN}/actions/timers.py", args)
|
||||||
|
|
||||||
|
def source(args):
|
||||||
|
"""Run the source action"""
|
||||||
|
return run_script(f"{DOTFILES_BIN}/actions/source.py", args)
|
||||||
|
|
||||||
def ensure_git_hooks():
|
def ensure_git_hooks():
|
||||||
"""Ensure git hooks are correctly set up"""
|
"""Ensure git hooks are correctly set up"""
|
||||||
hooks_dir = os.path.join(DOTFILES_ROOT, ".git/hooks")
|
hooks_dir = os.path.join(DOTFILES_ROOT, ".git/hooks")
|
||||||
@@ -114,7 +122,9 @@ def main():
|
|||||||
"secrets": secrets,
|
"secrets": secrets,
|
||||||
"auto-start": auto_start,
|
"auto-start": auto_start,
|
||||||
"service": service,
|
"service": service,
|
||||||
"lint": lint
|
"lint": lint,
|
||||||
|
"timers": timers,
|
||||||
|
"source": source
|
||||||
}
|
}
|
||||||
|
|
||||||
if command in commands:
|
if command in commands:
|
||||||
|
@@ -10,3 +10,21 @@
|
|||||||
name: ssh
|
name: ssh
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
|
- name: reload systemd
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: restart borg-local-sync
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: borg-local-sync.service
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: restart borg-local-sync-timer
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: borg-local-sync.timer
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
||||||
|
93
config/ansible/tasks/servers/borg-backup.yml
Normal file
93
config/ansible/tasks/servers/borg-backup.yml
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
---
|
||||||
|
- name: Borg Backup Installation and Configuration
|
||||||
|
block:
|
||||||
|
- name: Check if Borg is already installed
|
||||||
|
ansible.builtin.command: which borg
|
||||||
|
register: borg_check
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Ensure Borg is installed
|
||||||
|
ansible.builtin.package:
|
||||||
|
name: borg
|
||||||
|
state: present
|
||||||
|
become: true
|
||||||
|
when: borg_check.rc != 0
|
||||||
|
|
||||||
|
- name: Set Borg backup facts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
|
||||||
|
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
|
||||||
|
borg_backup_dir: "/mnt/services"
|
||||||
|
borg_repo_dir: "/mnt/object_storage/borg-repo"
|
||||||
|
|
||||||
|
- name: Create Borg directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ borg_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ borg_config_dir }}"
|
||||||
|
- "/mnt/object_storage"
|
||||||
|
loop_control:
|
||||||
|
loop_var: borg_dir
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Check if Borg repository exists
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "{{ borg_repo_dir }}/config"
|
||||||
|
register: borg_repo_check
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Initialize Borg repository
|
||||||
|
ansible.builtin.command: >
|
||||||
|
borg init --encryption=repokey {{ borg_repo_dir }}
|
||||||
|
environment:
|
||||||
|
BORG_PASSPHRASE: "{{ borg_passphrase }}"
|
||||||
|
become: true
|
||||||
|
when: not borg_repo_check.stat.exists
|
||||||
|
|
||||||
|
- name: Create Borg backup script
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/borg-backup.sh.j2
|
||||||
|
dest: "{{ borg_config_dir }}/backup.sh"
|
||||||
|
mode: "0755"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Create Borg systemd service
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/borg-backup.service.j2
|
||||||
|
dest: /etc/systemd/system/borg-backup.service
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
register: borg_service
|
||||||
|
|
||||||
|
- name: Create Borg systemd timer
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/borg-backup.timer.j2
|
||||||
|
dest: /etc/systemd/system/borg-backup.timer
|
||||||
|
mode: "0644"
|
||||||
|
become: true
|
||||||
|
register: borg_timer
|
||||||
|
|
||||||
|
- name: Reload systemd daemon
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
become: true
|
||||||
|
when: borg_service.changed or borg_timer.changed
|
||||||
|
|
||||||
|
- name: Enable and start Borg backup timer
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: borg-backup.timer
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Display Borg backup status
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
|
||||||
|
|
||||||
|
tags:
|
||||||
|
- borg-backup
|
||||||
|
- borg
|
||||||
|
- backup
|
95
config/ansible/tasks/servers/borg-local-sync.yml
Normal file
95
config/ansible/tasks/servers/borg-local-sync.yml
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
---
|
||||||
|
- name: Borg Local Sync Installation and Configuration
|
||||||
|
block:
|
||||||
|
- name: Set Borg backup facts
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
|
||||||
|
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
|
||||||
|
borg_backup_dir: "/mnt/services"
|
||||||
|
borg_repo_dir: "/mnt/object_storage/borg-repo"
|
||||||
|
|
||||||
|
- name: Create Borg local sync script
|
||||||
|
template:
|
||||||
|
src: borg-local-sync.sh.j2
|
||||||
|
dest: /usr/local/bin/borg-local-sync.sh
|
||||||
|
mode: "0755"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
become: yes
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
|
- name: Create Borg local sync systemd service
|
||||||
|
template:
|
||||||
|
src: borg-local-sync.service.j2
|
||||||
|
dest: /etc/systemd/system/borg-local-sync.service
|
||||||
|
mode: "0644"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
become: yes
|
||||||
|
notify:
|
||||||
|
- reload systemd
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
|
- name: Create Borg local sync systemd timer
|
||||||
|
template:
|
||||||
|
src: borg-local-sync.timer.j2
|
||||||
|
dest: /etc/systemd/system/borg-local-sync.timer
|
||||||
|
mode: "0644"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
become: yes
|
||||||
|
notify:
|
||||||
|
- reload systemd
|
||||||
|
- restart borg-local-sync-timer
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
|
- name: Create log file for Borg local sync
|
||||||
|
file:
|
||||||
|
path: /var/log/borg-local-sync.log
|
||||||
|
state: touch
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
become: yes
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
|
- name: Enable and start Borg local sync timer
|
||||||
|
systemd:
|
||||||
|
name: borg-local-sync.timer
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
daemon_reload: yes
|
||||||
|
become: yes
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
|
- name: Add logrotate configuration for Borg local sync
|
||||||
|
copy:
|
||||||
|
content: |
|
||||||
|
/var/log/borg-local-sync.log {
|
||||||
|
daily
|
||||||
|
rotate 30
|
||||||
|
compress
|
||||||
|
delaycompress
|
||||||
|
missingok
|
||||||
|
notifempty
|
||||||
|
create 644 root root
|
||||||
|
}
|
||||||
|
dest: /etc/logrotate.d/borg-local-sync
|
||||||
|
mode: "0644"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
become: yes
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
- borg
|
||||||
|
- backup
|
||||||
|
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
- borg
|
||||||
|
- backup
|
@@ -1,47 +1,29 @@
|
|||||||
---
|
---
|
||||||
- name: Dynamic DNS setup
|
- name: Dynamic DNS setup
|
||||||
block:
|
block:
|
||||||
- name: Create environment file for dynamic DNS
|
- name: Create systemd environment file for dynamic DNS
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: "{{ playbook_dir }}/templates/dynamic-dns.env.j2"
|
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
|
||||||
dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns.env"
|
dest: "/etc/dynamic-dns-systemd.env"
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
become: true
|
||||||
|
|
||||||
- name: Create dynamic DNS wrapper script
|
- name: Create dynamic DNS wrapper script
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh"
|
dest: "/usr/local/bin/dynamic-dns-update.sh"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
content: |
|
content: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
source {{ ansible_user_dir }}/.local/bin/dynamic-dns.env
|
|
||||||
|
|
||||||
# Change to the directory containing the binary
|
|
||||||
cd {{ ansible_user_dir }}/.local/bin
|
|
||||||
|
|
||||||
# Run dynamic DNS update (binary compiled by utils.yml)
|
# Run dynamic DNS update (binary compiled by utils.yml)
|
||||||
dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
|
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
|
||||||
|
become: true
|
||||||
- name: Setup cron job for dynamic DNS updates (fallback)
|
|
||||||
ansible.builtin.cron:
|
|
||||||
name: "Dynamic DNS Update"
|
|
||||||
minute: "*/15"
|
|
||||||
job: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh"
|
|
||||||
user: "{{ ansible_user }}"
|
|
||||||
state: present
|
|
||||||
ignore_errors: true
|
|
||||||
tags: [cron]
|
|
||||||
|
|
||||||
- name: Create systemd user directory
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ ansible_user_dir }}/.config/systemd/user"
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
|
|
||||||
- name: Create dynamic DNS systemd timer
|
- name: Create dynamic DNS systemd timer
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.timer"
|
dest: "/etc/systemd/system/dynamic-dns.timer"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@@ -54,10 +36,12 @@
|
|||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
|
become: true
|
||||||
|
register: ddns_timer
|
||||||
|
|
||||||
- name: Create dynamic DNS systemd service
|
- name: Create dynamic DNS systemd service
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.service"
|
dest: "/etc/systemd/system/dynamic-dns.service"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
content: |
|
content: |
|
||||||
[Unit]
|
[Unit]
|
||||||
@@ -67,31 +51,36 @@
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStart={{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh
|
ExecStart=/usr/local/bin/dynamic-dns-update.sh
|
||||||
EnvironmentFile={{ ansible_user_dir }}/.local/bin/dynamic-dns.env
|
EnvironmentFile=/etc/dynamic-dns-systemd.env
|
||||||
|
User={{ ansible_user }}
|
||||||
|
Group={{ ansible_user }}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=default.target
|
WantedBy=multi-user.target
|
||||||
|
become: true
|
||||||
|
register: ddns_service
|
||||||
|
|
||||||
- name: Reload systemd user daemon
|
- name: Reload systemd daemon
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
scope: user
|
become: true
|
||||||
|
when: ddns_timer.changed or ddns_service.changed
|
||||||
|
|
||||||
- name: Enable and start dynamic DNS timer
|
- name: Enable and start dynamic DNS timer
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: dynamic-dns.timer
|
name: dynamic-dns.timer
|
||||||
enabled: true
|
enabled: true
|
||||||
state: started
|
state: started
|
||||||
scope: user
|
become: true
|
||||||
|
|
||||||
- name: Display setup completion message
|
- name: Display setup completion message
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
msg: |
|
msg: |
|
||||||
Dynamic DNS setup complete!
|
Dynamic DNS setup complete!
|
||||||
- Systemd timer: systemctl --user status dynamic-dns.timer
|
- Systemd timer: sudo systemctl status dynamic-dns.timer
|
||||||
- Check logs: journalctl --user -u dynamic-dns.service -f
|
- Check logs: sudo journalctl -u dynamic-dns.service -f
|
||||||
- Manual run: ~/.local/bin/dynamic-dns-update.sh
|
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
|
||||||
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
|
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
|
||||||
|
|
||||||
when: inventory_hostname == 'mennos-cachyos-desktop'
|
when: inventory_hostname == 'mennos-cachyos-desktop'
|
||||||
|
@@ -30,6 +30,16 @@
|
|||||||
tags:
|
tags:
|
||||||
- dynamic-dns
|
- dynamic-dns
|
||||||
|
|
||||||
|
- name: Include Borg Backup tasks
|
||||||
|
ansible.builtin.include_tasks: borg-backup.yml
|
||||||
|
tags:
|
||||||
|
- borg-backup
|
||||||
|
|
||||||
|
- name: Include Borg Local Sync tasks
|
||||||
|
ansible.builtin.include_tasks: borg-local-sync.yml
|
||||||
|
tags:
|
||||||
|
- borg-local-sync
|
||||||
|
|
||||||
- name: System performance optimizations
|
- name: System performance optimizations
|
||||||
ansible.posix.sysctl:
|
ansible.posix.sysctl:
|
||||||
name: "{{ item.name }}"
|
name: "{{ item.name }}"
|
||||||
@@ -130,3 +140,7 @@
|
|||||||
enabled: true
|
enabled: true
|
||||||
hosts:
|
hosts:
|
||||||
- mennos-cachyos-desktop
|
- mennos-cachyos-desktop
|
||||||
|
- name: avorion
|
||||||
|
enabled: true
|
||||||
|
hosts:
|
||||||
|
- mennos-cachyos-desktop
|
||||||
|
37
config/ansible/tasks/servers/services/avorion/avorion.yml
Normal file
37
config/ansible/tasks/servers/services/avorion/avorion.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Avorion service
|
||||||
|
block:
|
||||||
|
- name: Set Avorion directories
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
|
||||||
|
avorion_data_dir: "/mnt/services/avorion"
|
||||||
|
|
||||||
|
- name: Create Avorion directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ avorion_service_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create Avorion data directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ avorion_data_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Deploy Avorion docker-compose.yml
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ avorion_service_dir }}/docker-compose.yml"
|
||||||
|
mode: "0644"
|
||||||
|
register: avorion_compose
|
||||||
|
|
||||||
|
- name: Stop Avorion service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||||
|
when: avorion_compose.changed
|
||||||
|
|
||||||
|
- name: Start Avorion service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
|
||||||
|
when: avorion_compose.changed
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- avorion
|
@@ -0,0 +1,11 @@
|
|||||||
|
services:
|
||||||
|
avorion:
|
||||||
|
image: rfvgyhn/avorion:2.5.8.42638
|
||||||
|
volumes:
|
||||||
|
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
|
||||||
|
ports:
|
||||||
|
- 27000:27000
|
||||||
|
- 27000:27000/udp
|
||||||
|
- 27003:27003/udp
|
||||||
|
- 27020:27020/udp
|
||||||
|
- 27021:27021/udp
|
31
config/ansible/templates/borg-backup.service.j2
Normal file
31
config/ansible/templates/borg-backup.service.j2
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Borg Backup Service
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
User=root
|
||||||
|
Group=root
|
||||||
|
ExecStart={{ borg_config_dir }}/backup.sh
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
Environment="BORG_PASSPHRASE={{ borg_passphrase }}"
|
||||||
|
Environment="BORG_REPO={{ borg_repo_dir }}"
|
||||||
|
Environment="BORG_CACHE_DIR={{ borg_config_dir }}/cache"
|
||||||
|
Environment="BORG_CONFIG_DIR={{ borg_config_dir }}/config"
|
||||||
|
Environment="BORG_SECURITY_DIR={{ borg_config_dir }}/security"
|
||||||
|
Environment="BORG_KEYS_DIR={{ borg_config_dir }}/keys"
|
||||||
|
|
||||||
|
# Security settings
|
||||||
|
NoNewPrivileges=true
|
||||||
|
PrivateTmp=true
|
||||||
|
ProtectSystem=strict
|
||||||
|
ReadWritePaths=/mnt/services /mnt/object_storage /var/log {{ borg_config_dir }}
|
||||||
|
ProtectHome=read-only
|
||||||
|
ProtectControlGroups=true
|
||||||
|
RestrictRealtime=true
|
||||||
|
SystemCallFilter=@system-service
|
||||||
|
SystemCallErrorNumber=EPERM
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
157
config/ansible/templates/borg-backup.sh.j2
Normal file
157
config/ansible/templates/borg-backup.sh.j2
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Borg backup script for /mnt/services
|
||||||
|
# This script creates incremental backups of the services directory
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
export BORG_REPO="{{ borg_repo_dir }}"
|
||||||
|
export BORG_PASSPHRASE="{{ borg_passphrase }}"
|
||||||
|
export BORG_CACHE_DIR="{{ borg_config_dir }}/cache"
|
||||||
|
export BORG_CONFIG_DIR="{{ borg_config_dir }}/config"
|
||||||
|
export BORG_SECURITY_DIR="{{ borg_config_dir }}/security"
|
||||||
|
export BORG_KEYS_DIR="{{ borg_config_dir }}/keys"
|
||||||
|
|
||||||
|
# Telegram notification variables
|
||||||
|
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='password') }}"
|
||||||
|
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='chat_id') }}"
|
||||||
|
|
||||||
|
# Backup name with timestamp
|
||||||
|
BACKUP_NAME="services-$(date +%Y%m%d-%H%M%S)"
|
||||||
|
|
||||||
|
# Log function
|
||||||
|
log() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a /var/log/borg-backup.log
|
||||||
|
}
|
||||||
|
|
||||||
|
# Telegram notification function
|
||||||
|
send_telegram() {
|
||||||
|
local message="$1"
|
||||||
|
local silent="${2:-false}"
|
||||||
|
|
||||||
|
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
|
||||||
|
log "Telegram credentials not configured, skipping notification"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local payload=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"chat_id": "$TELEGRAM_CHAT_ID",
|
||||||
|
"text": "$message",
|
||||||
|
"parse_mode": "HTML",
|
||||||
|
"disable_notification": $silent
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
curl -s -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$payload" \
|
||||||
|
"https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" > /dev/null 2>&1
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
log "Telegram notification sent successfully"
|
||||||
|
else
|
||||||
|
log "Failed to send Telegram notification"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ensure all Borg directories exist
|
||||||
|
mkdir -p "$BORG_CACHE_DIR"
|
||||||
|
mkdir -p "$BORG_CONFIG_DIR"
|
||||||
|
mkdir -p "$BORG_SECURITY_DIR"
|
||||||
|
mkdir -p "$BORG_KEYS_DIR"
|
||||||
|
|
||||||
|
# Start backup
|
||||||
|
log "Starting Borg backup: $BACKUP_NAME"
|
||||||
|
|
||||||
|
# Create backup
|
||||||
|
borg create \
|
||||||
|
--verbose \
|
||||||
|
--filter AME \
|
||||||
|
--list \
|
||||||
|
--stats \
|
||||||
|
--show-rc \
|
||||||
|
--compression lz4 \
|
||||||
|
--exclude-caches \
|
||||||
|
--exclude '*.tmp' \
|
||||||
|
--exclude '*.temp' \
|
||||||
|
--exclude '*.log' \
|
||||||
|
--exclude '*/.cache' \
|
||||||
|
--exclude '*/cache' \
|
||||||
|
--exclude '*/logs' \
|
||||||
|
--exclude '*/tmp' \
|
||||||
|
--exclude '*/node_modules' \
|
||||||
|
--exclude '*/__pycache__' \
|
||||||
|
"::$BACKUP_NAME" \
|
||||||
|
{{ borg_backup_dir }}
|
||||||
|
|
||||||
|
backup_exit=$?
|
||||||
|
|
||||||
|
log "Backup finished with exit code: $backup_exit"
|
||||||
|
|
||||||
|
# Prune old backups (keep last 7 daily, 4 weekly, 6 monthly)
|
||||||
|
log "Pruning old backups"
|
||||||
|
|
||||||
|
# Check if there are any archives to prune first
|
||||||
|
archive_count=$(borg list --short --prefix 'services-' 2>/dev/null | wc -l)
|
||||||
|
|
||||||
|
if [ "$archive_count" -gt 1 ]; then
|
||||||
|
borg prune \
|
||||||
|
--list \
|
||||||
|
--prefix 'services-' \
|
||||||
|
--show-rc \
|
||||||
|
--keep-daily 7 \
|
||||||
|
--keep-weekly 4 \
|
||||||
|
--keep-monthly 6
|
||||||
|
prune_exit=$?
|
||||||
|
else
|
||||||
|
log "Only one or no archives found, skipping prune"
|
||||||
|
prune_exit=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Prune finished with exit code: $prune_exit"
|
||||||
|
|
||||||
|
# Compact repository
|
||||||
|
log "Compacting repository"
|
||||||
|
borg compact
|
||||||
|
|
||||||
|
compact_exit=$?
|
||||||
|
|
||||||
|
log "Compact finished with exit code: $compact_exit"
|
||||||
|
|
||||||
|
# Global exit status
|
||||||
|
global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit ))
|
||||||
|
global_exit=$(( compact_exit > global_exit ? compact_exit : global_exit ))
|
||||||
|
|
||||||
|
if [ $global_exit -eq 0 ]; then
|
||||||
|
log "Backup completed successfully"
|
||||||
|
send_telegram "🔒 <b>Borg Backup Success</b>
|
||||||
|
|
||||||
|
✅ Backup: $BACKUP_NAME completed successfully
|
||||||
|
📊 Repository: {{ borg_repo_dir }}
|
||||||
|
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
All operations completed without errors." "true"
|
||||||
|
elif [ $global_exit -eq 1 ]; then
|
||||||
|
log "Backup completed with warnings (exit code: $global_exit)"
|
||||||
|
send_telegram "⚠️ <b>Borg Backup Warning</b>
|
||||||
|
|
||||||
|
⚠️ Backup: $BACKUP_NAME completed with warnings
|
||||||
|
📊 Repository: {{ borg_repo_dir }}
|
||||||
|
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
Exit code: $global_exit
|
||||||
|
Check logs for details: /var/log/borg-backup.log"
|
||||||
|
else
|
||||||
|
log "Backup completed with warnings or errors (exit code: $global_exit)"
|
||||||
|
send_telegram "❌ <b>Borg Backup Failed</b>
|
||||||
|
|
||||||
|
❌ Backup: $BACKUP_NAME failed
|
||||||
|
📊 Repository: {{ borg_repo_dir }}
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
Exit code: $global_exit
|
||||||
|
Check logs immediately: /var/log/borg-backup.log"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit $global_exit
|
12
config/ansible/templates/borg-backup.timer.j2
Normal file
12
config/ansible/templates/borg-backup.timer.j2
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Run Borg Backup Daily
|
||||||
|
Requires=borg-backup.service
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
# Run daily at 2 AM
|
||||||
|
OnCalendar=daily
|
||||||
|
Persistent=true
|
||||||
|
RandomizedDelaySec=1800
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
48
config/ansible/templates/borg-local-sync.service.j2
Normal file
48
config/ansible/templates/borg-local-sync.service.j2
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Borg Local Sync - Copy Borg repository to local storage
|
||||||
|
Documentation=man:borg(1)
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
# Ensure this runs after the main backup has completed
|
||||||
|
After=borg-backup.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
User=root
|
||||||
|
Group=root
|
||||||
|
|
||||||
|
# Set up environment
|
||||||
|
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Environment="LANG=en_US.UTF-8"
|
||||||
|
Environment="LC_ALL=en_US.UTF-8"
|
||||||
|
|
||||||
|
# Security settings
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=read-only
|
||||||
|
ReadWritePaths=/var/log /mnt/borg-backups {{ borg_config_dir }}
|
||||||
|
PrivateTmp=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
ProtectControlGroups=yes
|
||||||
|
RestrictRealtime=yes
|
||||||
|
RestrictSUIDSGID=yes
|
||||||
|
|
||||||
|
# Resource limits
|
||||||
|
MemoryMax=2G
|
||||||
|
CPUQuota=80%
|
||||||
|
IOWeight=200
|
||||||
|
|
||||||
|
# Timeout settings (local sync might take a while for initial copy)
|
||||||
|
TimeoutStartSec=3600
|
||||||
|
TimeoutStopSec=300
|
||||||
|
|
||||||
|
# Execute the sync script
|
||||||
|
ExecStart=/usr/local/bin/borg-local-sync.sh
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
SyslogIdentifier=borg-local-sync
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
227
config/ansible/templates/borg-local-sync.sh.j2
Normal file
227
config/ansible/templates/borg-local-sync.sh.j2
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Borg local sync script for creating local copies of cloud backups
|
||||||
|
# This script syncs the Borg repository from JuiceFS/S3 to local ZFS storage
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
export BORG_REPO_SOURCE="{{ borg_repo_dir }}"
|
||||||
|
export BORG_REPO_LOCAL="/mnt/borg-backups"
|
||||||
|
export ZFS_POOL="datapool"
|
||||||
|
export ZFS_DATASET="datapool/borg-backups"
|
||||||
|
export MOUNT_POINT="/mnt/borg-backups"
|
||||||
|
|
||||||
|
# Telegram notification variables
|
||||||
|
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='password') }}"
|
||||||
|
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='chat_id') }}"
|
||||||
|
|
||||||
|
# Log function
|
||||||
|
log() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a /var/log/borg-local-sync.log
|
||||||
|
}
|
||||||
|
|
||||||
|
# Telegram notification function
|
||||||
|
send_telegram() {
|
||||||
|
local message="$1"
|
||||||
|
local silent="${2:-false}"
|
||||||
|
|
||||||
|
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
|
||||||
|
log "Telegram credentials not configured, skipping notification"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
local payload=$(cat <<EOF
|
||||||
|
{
|
||||||
|
"chat_id": "$TELEGRAM_CHAT_ID",
|
||||||
|
"text": "$message",
|
||||||
|
"parse_mode": "HTML",
|
||||||
|
"disable_notification": $silent
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
curl -s -X POST \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$payload" \
|
||||||
|
"https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" > /dev/null 2>&1
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
log "Telegram notification sent successfully"
|
||||||
|
else
|
||||||
|
log "Failed to send Telegram notification"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if ZFS pool is available
|
||||||
|
check_zfs_pool() {
|
||||||
|
if ! zpool status "$ZFS_POOL" > /dev/null 2>&1; then
|
||||||
|
log "ERROR: ZFS pool $ZFS_POOL is not available"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ ZFS pool not available: $ZFS_POOL
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
The 20TB USB drive may not be connected or the ZFS pool is not imported.
|
||||||
|
Please check the physical connection and run: sudo zpool import $ZFS_POOL"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if the specific ZFS dataset exists
|
||||||
|
if ! zfs list "$ZFS_DATASET" > /dev/null 2>&1; then
|
||||||
|
log "ERROR: ZFS dataset $ZFS_DATASET is not available"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ ZFS dataset not available: $ZFS_DATASET
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
The ZFS dataset may not exist or be mounted.
|
||||||
|
Please check: sudo zfs create $ZFS_DATASET"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if mount point is available
|
||||||
|
check_mount_point() {
|
||||||
|
if ! mountpoint -q "$MOUNT_POINT"; then
|
||||||
|
log "ERROR: Mount point $MOUNT_POINT is not mounted"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ Mount point not available: $MOUNT_POINT
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
The ZFS dataset may not be mounted.
|
||||||
|
Please check: sudo zfs mount $ZFS_DATASET"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if source repository is available
|
||||||
|
check_source_repo() {
|
||||||
|
if [ ! -d "$BORG_REPO_SOURCE" ]; then
|
||||||
|
log "ERROR: Source Borg repository not found: $BORG_REPO_SOURCE"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ Source repository not found: $BORG_REPO_SOURCE
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
JuiceFS may not be mounted or the source repository path is incorrect."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check available space
|
||||||
|
check_space() {
|
||||||
|
local source_size=$(sudo du -sb "$BORG_REPO_SOURCE" 2>/dev/null | cut -f1)
|
||||||
|
local available_space=$(df -B1 "$MOUNT_POINT" | tail -1 | awk '{print $4}')
|
||||||
|
|
||||||
|
if [ -z "$source_size" ]; then
|
||||||
|
log "WARNING: Could not determine source repository size"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add 20% buffer for safety
|
||||||
|
local required_space=$((source_size * 120 / 100))
|
||||||
|
|
||||||
|
if [ "$available_space" -lt "$required_space" ]; then
|
||||||
|
local source_gb=$((source_size / 1024 / 1024 / 1024))
|
||||||
|
local available_gb=$((available_space / 1024 / 1024 / 1024))
|
||||||
|
local required_gb=$((required_space / 1024 / 1024 / 1024))
|
||||||
|
|
||||||
|
log "ERROR: Insufficient space. Source: ${source_gb}GB, Available: ${available_gb}GB, Required: ${required_gb}GB"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ Insufficient disk space
|
||||||
|
📊 Source size: ${source_gb}GB
|
||||||
|
💾 Available: ${available_gb}GB
|
||||||
|
⚠️ Required: ${required_gb}GB (with 20% buffer)
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
Please free up space on the local backup drive."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Perform the sync
|
||||||
|
sync_repository() {
|
||||||
|
log "Starting rsync of Borg repository"
|
||||||
|
|
||||||
|
# Get initial sizes for reporting
|
||||||
|
local source_size_before=$(sudo du -sh "$BORG_REPO_SOURCE" 2>/dev/null | cut -f1)
|
||||||
|
local dest_size_before="0B"
|
||||||
|
if [ -d "$BORG_REPO_LOCAL" ]; then
|
||||||
|
dest_size_before=$(sudo du -sh "$BORG_REPO_LOCAL" 2>/dev/null | cut -f1)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Perform the sync with detailed logging
|
||||||
|
sudo rsync -avh --delete --progress \
|
||||||
|
--exclude="lock.exclusive" \
|
||||||
|
--exclude="lock.roster" \
|
||||||
|
"$BORG_REPO_SOURCE/" "$BORG_REPO_LOCAL/" 2>&1 | while read line; do
|
||||||
|
log "rsync: $line"
|
||||||
|
done
|
||||||
|
|
||||||
|
local rsync_exit=${PIPESTATUS[0]}
|
||||||
|
|
||||||
|
# Get final sizes for reporting
|
||||||
|
local dest_size_after=$(sudo du -sh "$BORG_REPO_LOCAL" 2>/dev/null | cut -f1)
|
||||||
|
|
||||||
|
if [ $rsync_exit -eq 0 ]; then
|
||||||
|
log "Rsync completed successfully"
|
||||||
|
send_telegram "🔒 <b>Borg Local Sync Success</b>
|
||||||
|
|
||||||
|
✅ Local backup sync completed successfully
|
||||||
|
📂 Source: $BORG_REPO_SOURCE (${source_size_before})
|
||||||
|
💾 Destination: $BORG_REPO_LOCAL (${dest_size_after})
|
||||||
|
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
Local backup copy is now up to date." "true"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log "Rsync failed with exit code: $rsync_exit"
|
||||||
|
send_telegram "❌ <b>Borg Local Sync Failed</b>
|
||||||
|
|
||||||
|
❌ Rsync failed during repository sync
|
||||||
|
📂 Source: $BORG_REPO_SOURCE
|
||||||
|
💾 Destination: $BORG_REPO_LOCAL
|
||||||
|
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
Exit code: $rsync_exit
|
||||||
|
Check logs: /var/log/borg-local-sync.log"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
log "Starting Borg local sync process"
|
||||||
|
|
||||||
|
# Run all pre-flight checks
|
||||||
|
if ! check_zfs_pool; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! check_mount_point; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! check_source_repo; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! check_space; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# All checks passed, proceed with sync
|
||||||
|
log "All pre-flight checks passed, starting sync"
|
||||||
|
|
||||||
|
if sync_repository; then
|
||||||
|
log "Local sync completed successfully"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
log "Local sync failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
17
config/ansible/templates/borg-local-sync.timer.j2
Normal file
17
config/ansible/templates/borg-local-sync.timer.j2
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Run Borg Local Sync daily
|
||||||
|
Documentation=man:borg(1)
|
||||||
|
Requires=borg-local-sync.service
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
# Run daily at 3:00 AM (1 hour after main backup at 2:00 AM)
|
||||||
|
OnCalendar=*-*-* 03:00:00
|
||||||
|
# Add randomization to prevent conflicts if multiple systems exist
|
||||||
|
RandomizedDelaySec=300
|
||||||
|
# Ensure timer persists across reboots
|
||||||
|
Persistent=true
|
||||||
|
# Wake system from suspend if needed
|
||||||
|
WakeSystem=false
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
12
config/ansible/templates/dynamic-dns-systemd.env.j2
Normal file
12
config/ansible/templates/dynamic-dns-systemd.env.j2
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# Dynamic DNS Environment Configuration for SystemD
|
||||||
|
# This file contains sensitive credentials and should be kept secure
|
||||||
|
# Credentials are automatically retrieved from OnePassword
|
||||||
|
|
||||||
|
# CloudFlare API Token (required)
|
||||||
|
# Retrieved from OnePassword: CloudFlare API Token
|
||||||
|
CLOUDFLARE_API_TOKEN={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}
|
||||||
|
|
||||||
|
# Telegram Bot Credentials (for notifications when IP changes)
|
||||||
|
# Retrieved from OnePassword: Telegram DynDNS Bot
|
||||||
|
TELEGRAM_BOT_TOKEN={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}
|
||||||
|
TELEGRAM_CHAT_ID={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}
|
@@ -1,12 +0,0 @@
|
|||||||
# Dynamic DNS Environment Configuration
|
|
||||||
# This file contains sensitive credentials and should be kept secure
|
|
||||||
# Credentials are automatically retrieved from OnePassword
|
|
||||||
|
|
||||||
# CloudFlare API Token (required)
|
|
||||||
# Retrieved from OnePassword: CloudFlare API Token
|
|
||||||
export CLOUDFLARE_API_TOKEN="{{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}"
|
|
||||||
|
|
||||||
# Telegram Bot Credentials (for notifications when IP changes)
|
|
||||||
# Retrieved from OnePassword: Telegram DynDNS Bot
|
|
||||||
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}"
|
|
||||||
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}"
|
|
@@ -35,24 +35,24 @@ useNewBigFolderSizeLimit=true
|
|||||||
0\Folders\2\version=2
|
0\Folders\2\version=2
|
||||||
0\Folders\2\virtualFilesMode=off
|
0\Folders\2\virtualFilesMode=off
|
||||||
0\Folders\3\ignoreHiddenFiles=false
|
0\Folders\3\ignoreHiddenFiles=false
|
||||||
0\Folders\3\journalPath=.sync_65289e64a490.db
|
0\Folders\3\journalPath=.sync_886cca272fe5.db
|
||||||
0\Folders\3\localPath=/home/menno/Documents/
|
0\Folders\3\localPath=/home/menno/Pictures/
|
||||||
0\Folders\3\paused=false
|
0\Folders\3\paused=false
|
||||||
0\Folders\3\targetPath=/Documents
|
0\Folders\3\targetPath=/Pictures
|
||||||
0\Folders\3\version=2
|
0\Folders\3\version=2
|
||||||
0\Folders\3\virtualFilesMode=off
|
0\Folders\3\virtualFilesMode=off
|
||||||
0\Folders\4\ignoreHiddenFiles=false
|
0\Folders\4\ignoreHiddenFiles=false
|
||||||
0\Folders\4\journalPath=.sync_886cca272fe5.db
|
0\Folders\4\journalPath=.sync_90ea5e3c7a33.db
|
||||||
0\Folders\4\localPath=/home/menno/Pictures/
|
0\Folders\4\localPath=/home/menno/Videos/
|
||||||
0\Folders\4\paused=false
|
0\Folders\4\paused=false
|
||||||
0\Folders\4\targetPath=/Pictures
|
0\Folders\4\targetPath=/Videos
|
||||||
0\Folders\4\version=2
|
0\Folders\4\version=2
|
||||||
0\Folders\4\virtualFilesMode=off
|
0\Folders\4\virtualFilesMode=off
|
||||||
0\Folders\5\ignoreHiddenFiles=false
|
0\Folders\5\ignoreHiddenFiles=false
|
||||||
0\Folders\5\journalPath=.sync_90ea5e3c7a33.db
|
0\Folders\5\journalPath=.sync_65289e64a490.db
|
||||||
0\Folders\5\localPath=/home/menno/Videos/
|
0\Folders\5\localPath=/home/menno/Documents/
|
||||||
0\Folders\5\paused=false
|
0\Folders\5\paused=false
|
||||||
0\Folders\5\targetPath=/Videos
|
0\Folders\5\targetPath=/Documents
|
||||||
0\Folders\5\version=2
|
0\Folders\5\version=2
|
||||||
0\Folders\5\virtualFilesMode=off
|
0\Folders\5\virtualFilesMode=off
|
||||||
0\Folders\6\ignoreHiddenFiles=false
|
0\Folders\6\ignoreHiddenFiles=false
|
||||||
@@ -92,4 +92,4 @@ useDownloadLimit=0
|
|||||||
useUploadLimit=0
|
useUploadLimit=0
|
||||||
|
|
||||||
[Settings]
|
[Settings]
|
||||||
geometry=@ByteArray(\x1\xd9\xd0\xcb\0\x3\0\0\0\0\n\0\0\0\0\0\0\0\f7\0\0\x2\x8a\0\0\n\0\0\0\0\0\0\0\f7\0\0\x2\x8a\0\0\0\x1\0\0\0\0\x14\0\0\0\n\0\0\0\0\0\0\0\f7\0\0\x2\x8a)
|
geometry=@ByteArray(\x1\xd9\xd0\xcb\0\x3\0\0\0\0\0\0\0\0\x4\xe0\0\0\x2\x37\0\0\aj\0\0\0\0\0\0\x4\xe0\0\0\x2\x37\0\0\aj\0\0\0\x1\0\0\0\0\x14\0\0\0\0\0\0\0\x4\xe0\0\0\x2\x37\0\0\aj)
|
||||||
|
Reference in New Issue
Block a user