Merge branch 'master' of ssh://git.mvl.sh/vleeuwenmenno/dotfiles
Some checks failed
Ansible Lint Check / check-ansible (push) Failing after 23s
Nix Format Check / check-format (push) Failing after 26s
Python Lint Check / check-python (push) Failing after 6s

This commit is contained in:
2025-07-29 16:10:23 +02:00
20 changed files with 1104 additions and 72 deletions

View File

@@ -10,3 +10,21 @@
name: ssh
state: restarted
enabled: true
- name: reload systemd
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: restart borg-local-sync
become: true
ansible.builtin.systemd:
name: borg-local-sync.service
enabled: true
- name: restart borg-local-sync-timer
become: true
ansible.builtin.systemd:
name: borg-local-sync.timer
state: restarted
enabled: true

View File

@@ -0,0 +1,93 @@
---
- name: Borg Backup Installation and Configuration
block:
- name: Check if Borg is already installed
ansible.builtin.command: which borg
register: borg_check
ignore_errors: true
changed_when: false
- name: Ensure Borg is installed
ansible.builtin.package:
name: borg
state: present
become: true
when: borg_check.rc != 0
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg directories
ansible.builtin.file:
path: "{{ borg_dir }}"
state: directory
mode: "0755"
loop:
- "{{ borg_config_dir }}"
- "/mnt/object_storage"
loop_control:
loop_var: borg_dir
become: true
- name: Check if Borg repository exists
ansible.builtin.stat:
path: "{{ borg_repo_dir }}/config"
register: borg_repo_check
become: true
- name: Initialize Borg repository
ansible.builtin.command: >
borg init --encryption=repokey {{ borg_repo_dir }}
environment:
BORG_PASSPHRASE: "{{ borg_passphrase }}"
become: true
when: not borg_repo_check.stat.exists
- name: Create Borg backup script
ansible.builtin.template:
src: templates/borg-backup.sh.j2
dest: "{{ borg_config_dir }}/backup.sh"
mode: "0755"
become: true
- name: Create Borg systemd service
ansible.builtin.template:
src: templates/borg-backup.service.j2
dest: /etc/systemd/system/borg-backup.service
mode: "0644"
become: true
register: borg_service
- name: Create Borg systemd timer
ansible.builtin.template:
src: templates/borg-backup.timer.j2
dest: /etc/systemd/system/borg-backup.timer
mode: "0644"
become: true
register: borg_timer
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
become: true
when: borg_service.changed or borg_timer.changed
- name: Enable and start Borg backup timer
ansible.builtin.systemd:
name: borg-backup.timer
enabled: true
state: started
become: true
- name: Display Borg backup status
ansible.builtin.debug:
msg: "Borg backup is configured and will run daily at 2 AM. Logs available at /var/log/borg-backup.log"
tags:
- borg-backup
- borg
- backup

View File

@@ -0,0 +1,95 @@
---
- name: Borg Local Sync Installation and Configuration
block:
- name: Set Borg backup facts
ansible.builtin.set_fact:
borg_passphrase: "{{ lookup('community.general.onepassword', 'Borg Backup', vault='Dotfiles', field='password') }}"
borg_config_dir: "{{ ansible_env.HOME }}/.config/borg"
borg_backup_dir: "/mnt/services"
borg_repo_dir: "/mnt/object_storage/borg-repo"
- name: Create Borg local sync script
template:
src: borg-local-sync.sh.j2
dest: /usr/local/bin/borg-local-sync.sh
mode: "0755"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- name: Create Borg local sync systemd service
template:
src: borg-local-sync.service.j2
dest: /etc/systemd/system/borg-local-sync.service
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
tags:
- borg-local-sync
- name: Create Borg local sync systemd timer
template:
src: borg-local-sync.timer.j2
dest: /etc/systemd/system/borg-local-sync.timer
mode: "0644"
owner: root
group: root
become: yes
notify:
- reload systemd
- restart borg-local-sync-timer
tags:
- borg-local-sync
- name: Create log file for Borg local sync
file:
path: /var/log/borg-local-sync.log
state: touch
owner: root
group: root
mode: "0644"
become: yes
tags:
- borg-local-sync
- name: Enable and start Borg local sync timer
systemd:
name: borg-local-sync.timer
enabled: yes
state: started
daemon_reload: yes
become: yes
tags:
- borg-local-sync
- name: Add logrotate configuration for Borg local sync
copy:
content: |
/var/log/borg-local-sync.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 root root
}
dest: /etc/logrotate.d/borg-local-sync
mode: "0644"
owner: root
group: root
become: yes
tags:
- borg-local-sync
- borg
- backup
tags:
- borg-local-sync
- borg
- backup

View File

@@ -1,47 +1,29 @@
---
- name: Dynamic DNS setup
block:
- name: Create environment file for dynamic DNS
- name: Create systemd environment file for dynamic DNS
ansible.builtin.template:
src: "{{ playbook_dir }}/templates/dynamic-dns.env.j2"
dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns.env"
src: "{{ playbook_dir }}/templates/dynamic-dns-systemd.env.j2"
dest: "/etc/dynamic-dns-systemd.env"
mode: "0600"
owner: root
group: root
become: true
- name: Create dynamic DNS wrapper script
ansible.builtin.copy:
dest: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh"
dest: "/usr/local/bin/dynamic-dns-update.sh"
mode: "0755"
content: |
#!/bin/bash
# Load environment variables
source {{ ansible_user_dir }}/.local/bin/dynamic-dns.env
# Change to the directory containing the binary
cd {{ ansible_user_dir }}/.local/bin
# Run dynamic DNS update (binary compiled by utils.yml)
dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
- name: Setup cron job for dynamic DNS updates (fallback)
ansible.builtin.cron:
name: "Dynamic DNS Update"
minute: "*/15"
job: "{{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh"
user: "{{ ansible_user }}"
state: present
ignore_errors: true
tags: [cron]
- name: Create systemd user directory
ansible.builtin.file:
path: "{{ ansible_user_dir }}/.config/systemd/user"
state: directory
mode: "0755"
{{ ansible_user_dir }}/.local/bin/dynamic-dns-cf -record "vleeuwen.me,mvl.sh,mennovanleeuwen.nl" 2>&1 | logger -t dynamic-dns
become: true
- name: Create dynamic DNS systemd timer
ansible.builtin.copy:
dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.timer"
dest: "/etc/systemd/system/dynamic-dns.timer"
mode: "0644"
content: |
[Unit]
@@ -54,10 +36,12 @@
[Install]
WantedBy=timers.target
become: true
register: ddns_timer
- name: Create dynamic DNS systemd service
ansible.builtin.copy:
dest: "{{ ansible_user_dir }}/.config/systemd/user/dynamic-dns.service"
dest: "/etc/systemd/system/dynamic-dns.service"
mode: "0644"
content: |
[Unit]
@@ -67,31 +51,36 @@
[Service]
Type=oneshot
ExecStart={{ ansible_user_dir }}/.local/bin/dynamic-dns-update.sh
EnvironmentFile={{ ansible_user_dir }}/.local/bin/dynamic-dns.env
ExecStart=/usr/local/bin/dynamic-dns-update.sh
EnvironmentFile=/etc/dynamic-dns-systemd.env
User={{ ansible_user }}
Group={{ ansible_user }}
[Install]
WantedBy=default.target
WantedBy=multi-user.target
become: true
register: ddns_service
- name: Reload systemd user daemon
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
scope: user
become: true
when: ddns_timer.changed or ddns_service.changed
- name: Enable and start dynamic DNS timer
ansible.builtin.systemd:
name: dynamic-dns.timer
enabled: true
state: started
scope: user
become: true
- name: Display setup completion message
ansible.builtin.debug:
msg: |
Dynamic DNS setup complete!
- Systemd timer: systemctl --user status dynamic-dns.timer
- Check logs: journalctl --user -u dynamic-dns.service -f
- Manual run: ~/.local/bin/dynamic-dns-update.sh
- Systemd timer: sudo systemctl status dynamic-dns.timer
- Check logs: sudo journalctl -u dynamic-dns.service -f
- Manual run: sudo /usr/local/bin/dynamic-dns-update.sh
- Domains: vleeuwen.me, mvl.sh, mennovanleeuwen.nl
when: inventory_hostname == 'mennos-cachyos-desktop'

View File

@@ -30,6 +30,16 @@
tags:
- dynamic-dns
- name: Include Borg Backup tasks
ansible.builtin.include_tasks: borg-backup.yml
tags:
- borg-backup
- name: Include Borg Local Sync tasks
ansible.builtin.include_tasks: borg-local-sync.yml
tags:
- borg-local-sync
- name: System performance optimizations
ansible.posix.sysctl:
name: "{{ item.name }}"
@@ -130,3 +140,7 @@
enabled: true
hosts:
- mennos-cachyos-desktop
- name: avorion
enabled: true
hosts:
- mennos-cachyos-desktop

View File

@@ -0,0 +1,37 @@
---
- name: Deploy Avorion service
block:
- name: Set Avorion directories
ansible.builtin.set_fact:
avorion_service_dir: "{{ ansible_env.HOME }}/.services/avorion"
avorion_data_dir: "/mnt/services/avorion"
- name: Create Avorion directory
ansible.builtin.file:
path: "{{ avorion_service_dir }}"
state: directory
mode: "0755"
- name: Create Avorion data directory
ansible.builtin.file:
path: "{{ avorion_data_dir }}"
state: directory
mode: "0755"
- name: Deploy Avorion docker-compose.yml
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ avorion_service_dir }}/docker-compose.yml"
mode: "0644"
register: avorion_compose
- name: Stop Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" down --remove-orphans
when: avorion_compose.changed
- name: Start Avorion service
ansible.builtin.command: docker compose -f "{{ avorion_service_dir }}/docker-compose.yml" up -d
when: avorion_compose.changed
tags:
- services
- avorion

View File

@@ -0,0 +1,11 @@
services:
avorion:
image: rfvgyhn/avorion:2.5.8.42638
volumes:
- {{ avorion_data_dir }}:/home/steam/.avorion/galaxies/avorion_galaxy
ports:
- 27000:27000
- 27000:27000/udp
- 27003:27003/udp
- 27020:27020/udp
- 27021:27021/udp

View File

@@ -0,0 +1,31 @@
[Unit]
Description=Borg Backup Service
After=network.target
[Service]
Type=oneshot
User=root
Group=root
ExecStart={{ borg_config_dir }}/backup.sh
StandardOutput=journal
StandardError=journal
Environment="BORG_PASSPHRASE={{ borg_passphrase }}"
Environment="BORG_REPO={{ borg_repo_dir }}"
Environment="BORG_CACHE_DIR={{ borg_config_dir }}/cache"
Environment="BORG_CONFIG_DIR={{ borg_config_dir }}/config"
Environment="BORG_SECURITY_DIR={{ borg_config_dir }}/security"
Environment="BORG_KEYS_DIR={{ borg_config_dir }}/keys"
# Security settings
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ReadWritePaths=/mnt/services /mnt/object_storage /var/log {{ borg_config_dir }}
ProtectHome=read-only
ProtectControlGroups=true
RestrictRealtime=true
SystemCallFilter=@system-service
SystemCallErrorNumber=EPERM
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,157 @@
#!/bin/bash
# Borg backup script for /mnt/services
# This script creates incremental backups of the services directory
# Set environment variables
export BORG_REPO="{{ borg_repo_dir }}"
export BORG_PASSPHRASE="{{ borg_passphrase }}"
export BORG_CACHE_DIR="{{ borg_config_dir }}/cache"
export BORG_CONFIG_DIR="{{ borg_config_dir }}/config"
export BORG_SECURITY_DIR="{{ borg_config_dir }}/security"
export BORG_KEYS_DIR="{{ borg_config_dir }}/keys"
# Telegram notification variables
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='password') }}"
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='chat_id') }}"
# Backup name with timestamp
BACKUP_NAME="services-$(date +%Y%m%d-%H%M%S)"
# Log function
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a /var/log/borg-backup.log
}
# Telegram notification function
send_telegram() {
local message="$1"
local silent="${2:-false}"
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
log "Telegram credentials not configured, skipping notification"
return
fi
local payload=$(cat <<EOF
{
"chat_id": "$TELEGRAM_CHAT_ID",
"text": "$message",
"parse_mode": "HTML",
"disable_notification": $silent
}
EOF
)
curl -s -X POST \
-H "Content-Type: application/json" \
-d "$payload" \
"https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" > /dev/null 2>&1
if [ $? -eq 0 ]; then
log "Telegram notification sent successfully"
else
log "Failed to send Telegram notification"
fi
}
# Ensure all Borg directories exist
mkdir -p "$BORG_CACHE_DIR"
mkdir -p "$BORG_CONFIG_DIR"
mkdir -p "$BORG_SECURITY_DIR"
mkdir -p "$BORG_KEYS_DIR"
# Start backup
log "Starting Borg backup: $BACKUP_NAME"
# Create backup
borg create \
--verbose \
--filter AME \
--list \
--stats \
--show-rc \
--compression lz4 \
--exclude-caches \
--exclude '*.tmp' \
--exclude '*.temp' \
--exclude '*.log' \
--exclude '*/.cache' \
--exclude '*/cache' \
--exclude '*/logs' \
--exclude '*/tmp' \
--exclude '*/node_modules' \
--exclude '*/__pycache__' \
"::$BACKUP_NAME" \
{{ borg_backup_dir }}
backup_exit=$?
log "Backup finished with exit code: $backup_exit"
# Prune old backups (keep last 7 daily, 4 weekly, 6 monthly)
log "Pruning old backups"
# Check if there are any archives to prune first
archive_count=$(borg list --short --prefix 'services-' 2>/dev/null | wc -l)
if [ "$archive_count" -gt 1 ]; then
borg prune \
--list \
--prefix 'services-' \
--show-rc \
--keep-daily 7 \
--keep-weekly 4 \
--keep-monthly 6
prune_exit=$?
else
log "Only one or no archives found, skipping prune"
prune_exit=0
fi
log "Prune finished with exit code: $prune_exit"
# Compact repository
log "Compacting repository"
borg compact
compact_exit=$?
log "Compact finished with exit code: $compact_exit"
# Global exit status
global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit ))
global_exit=$(( compact_exit > global_exit ? compact_exit : global_exit ))
if [ $global_exit -eq 0 ]; then
log "Backup completed successfully"
send_telegram "🔒 <b>Borg Backup Success</b>
✅ Backup: $BACKUP_NAME completed successfully
📊 Repository: {{ borg_repo_dir }}
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
All operations completed without errors." "true"
elif [ $global_exit -eq 1 ]; then
log "Backup completed with warnings (exit code: $global_exit)"
send_telegram "⚠️ <b>Borg Backup Warning</b>
⚠️ Backup: $BACKUP_NAME completed with warnings
📊 Repository: {{ borg_repo_dir }}
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
Exit code: $global_exit
Check logs for details: /var/log/borg-backup.log"
else
log "Backup completed with warnings or errors (exit code: $global_exit)"
send_telegram "❌ <b>Borg Backup Failed</b>
❌ Backup: $BACKUP_NAME failed
📊 Repository: {{ borg_repo_dir }}
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
Exit code: $global_exit
Check logs immediately: /var/log/borg-backup.log"
fi
exit $global_exit

View File

@@ -0,0 +1,12 @@
[Unit]
Description=Run Borg Backup Daily
Requires=borg-backup.service
[Timer]
# Run daily at 2 AM
OnCalendar=daily
Persistent=true
RandomizedDelaySec=1800
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,48 @@
[Unit]
Description=Borg Local Sync - Copy Borg repository to local storage
Documentation=man:borg(1)
After=network-online.target
Wants=network-online.target
# Ensure this runs after the main backup has completed
After=borg-backup.service
[Service]
Type=oneshot
User=root
Group=root
# Set up environment
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
Environment="LANG=en_US.UTF-8"
Environment="LC_ALL=en_US.UTF-8"
# Security settings
ProtectSystem=strict
ProtectHome=read-only
ReadWritePaths=/var/log /mnt/borg-backups {{ borg_config_dir }}
PrivateTmp=yes
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
# Resource limits
MemoryMax=2G
CPUQuota=80%
IOWeight=200
# Timeout settings (local sync might take a while for initial copy)
TimeoutStartSec=3600
TimeoutStopSec=300
# Execute the sync script
ExecStart=/usr/local/bin/borg-local-sync.sh
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=borg-local-sync
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,227 @@
#!/bin/bash
# Borg local sync script for creating local copies of cloud backups
# This script syncs the Borg repository from JuiceFS/S3 to local ZFS storage
# Set environment variables
export BORG_REPO_SOURCE="{{ borg_repo_dir }}"
export BORG_REPO_LOCAL="/mnt/borg-backups"
export ZFS_POOL="datapool"
export ZFS_DATASET="datapool/borg-backups"
export MOUNT_POINT="/mnt/borg-backups"
# Telegram notification variables
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='password') }}"
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram Home Server Bot', vault='Dotfiles', field='chat_id') }}"
# Log function
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a /var/log/borg-local-sync.log
}
# Telegram notification function
send_telegram() {
local message="$1"
local silent="${2:-false}"
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
log "Telegram credentials not configured, skipping notification"
return
fi
local payload=$(cat <<EOF
{
"chat_id": "$TELEGRAM_CHAT_ID",
"text": "$message",
"parse_mode": "HTML",
"disable_notification": $silent
}
EOF
)
curl -s -X POST \
-H "Content-Type: application/json" \
-d "$payload" \
"https://api.telegram.org/bot$TELEGRAM_BOT_TOKEN/sendMessage" > /dev/null 2>&1
if [ $? -eq 0 ]; then
log "Telegram notification sent successfully"
else
log "Failed to send Telegram notification"
fi
}
# Check if ZFS pool is available
check_zfs_pool() {
if ! zpool status "$ZFS_POOL" > /dev/null 2>&1; then
log "ERROR: ZFS pool $ZFS_POOL is not available"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ ZFS pool not available: $ZFS_POOL
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
The 20TB USB drive may not be connected or the ZFS pool is not imported.
Please check the physical connection and run: sudo zpool import $ZFS_POOL"
return 1
fi
# Check if the specific ZFS dataset exists
if ! zfs list "$ZFS_DATASET" > /dev/null 2>&1; then
log "ERROR: ZFS dataset $ZFS_DATASET is not available"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ ZFS dataset not available: $ZFS_DATASET
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
The ZFS dataset may not exist or be mounted.
Please check: sudo zfs create $ZFS_DATASET"
return 1
fi
return 0
}
# Check if mount point is available
check_mount_point() {
if ! mountpoint -q "$MOUNT_POINT"; then
log "ERROR: Mount point $MOUNT_POINT is not mounted"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ Mount point not available: $MOUNT_POINT
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
The ZFS dataset may not be mounted.
Please check: sudo zfs mount $ZFS_DATASET"
return 1
fi
return 0
}
# Check if source repository is available
check_source_repo() {
if [ ! -d "$BORG_REPO_SOURCE" ]; then
log "ERROR: Source Borg repository not found: $BORG_REPO_SOURCE"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ Source repository not found: $BORG_REPO_SOURCE
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
JuiceFS may not be mounted or the source repository path is incorrect."
return 1
fi
return 0
}
# Check available space
check_space() {
local source_size=$(sudo du -sb "$BORG_REPO_SOURCE" 2>/dev/null | cut -f1)
local available_space=$(df -B1 "$MOUNT_POINT" | tail -1 | awk '{print $4}')
if [ -z "$source_size" ]; then
log "WARNING: Could not determine source repository size"
return 0
fi
# Add 20% buffer for safety
local required_space=$((source_size * 120 / 100))
if [ "$available_space" -lt "$required_space" ]; then
local source_gb=$((source_size / 1024 / 1024 / 1024))
local available_gb=$((available_space / 1024 / 1024 / 1024))
local required_gb=$((required_space / 1024 / 1024 / 1024))
log "ERROR: Insufficient space. Source: ${source_gb}GB, Available: ${available_gb}GB, Required: ${required_gb}GB"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ Insufficient disk space
📊 Source size: ${source_gb}GB
💾 Available: ${available_gb}GB
⚠️ Required: ${required_gb}GB (with 20% buffer)
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
Please free up space on the local backup drive."
return 1
fi
return 0
}
# Perform the sync
sync_repository() {
log "Starting rsync of Borg repository"
# Get initial sizes for reporting
local source_size_before=$(sudo du -sh "$BORG_REPO_SOURCE" 2>/dev/null | cut -f1)
local dest_size_before="0B"
if [ -d "$BORG_REPO_LOCAL" ]; then
dest_size_before=$(sudo du -sh "$BORG_REPO_LOCAL" 2>/dev/null | cut -f1)
fi
# Perform the sync with detailed logging
sudo rsync -avh --delete --progress \
--exclude="lock.exclusive" \
--exclude="lock.roster" \
"$BORG_REPO_SOURCE/" "$BORG_REPO_LOCAL/" 2>&1 | while read line; do
log "rsync: $line"
done
local rsync_exit=${PIPESTATUS[0]}
# Get final sizes for reporting
local dest_size_after=$(sudo du -sh "$BORG_REPO_LOCAL" 2>/dev/null | cut -f1)
if [ $rsync_exit -eq 0 ]; then
log "Rsync completed successfully"
send_telegram "🔒 <b>Borg Local Sync Success</b>
✅ Local backup sync completed successfully
📂 Source: $BORG_REPO_SOURCE (${source_size_before})
💾 Destination: $BORG_REPO_LOCAL (${dest_size_after})
🕐 Completed: $(date '+%Y-%m-%d %H:%M:%S')
Local backup copy is now up to date." "true"
return 0
else
log "Rsync failed with exit code: $rsync_exit"
send_telegram "❌ <b>Borg Local Sync Failed</b>
❌ Rsync failed during repository sync
📂 Source: $BORG_REPO_SOURCE
💾 Destination: $BORG_REPO_LOCAL
🕐 Failed: $(date '+%Y-%m-%d %H:%M:%S')
Exit code: $rsync_exit
Check logs: /var/log/borg-local-sync.log"
return 1
fi
}
# Main execution
log "Starting Borg local sync process"
# Run all pre-flight checks
if ! check_zfs_pool; then
exit 1
fi
if ! check_mount_point; then
exit 1
fi
if ! check_source_repo; then
exit 1
fi
if ! check_space; then
exit 1
fi
# All checks passed, proceed with sync
log "All pre-flight checks passed, starting sync"
if sync_repository; then
log "Local sync completed successfully"
exit 0
else
log "Local sync failed"
exit 1
fi

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Run Borg Local Sync daily
Documentation=man:borg(1)
Requires=borg-local-sync.service
[Timer]
# Run daily at 3:00 AM (1 hour after main backup at 2:00 AM)
OnCalendar=*-*-* 03:00:00
# Add randomization to prevent conflicts if multiple systems exist
RandomizedDelaySec=300
# Ensure timer persists across reboots
Persistent=true
# Wake system from suspend if needed
WakeSystem=false
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,12 @@
# Dynamic DNS Environment Configuration for SystemD
# This file contains sensitive credentials and should be kept secure
# Credentials are automatically retrieved from OnePassword
# CloudFlare API Token (required)
# Retrieved from OnePassword: CloudFlare API Token
CLOUDFLARE_API_TOKEN={{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}
# Telegram Bot Credentials (for notifications when IP changes)
# Retrieved from OnePassword: Telegram DynDNS Bot
TELEGRAM_BOT_TOKEN={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}
TELEGRAM_CHAT_ID={{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}

View File

@@ -1,12 +0,0 @@
# Dynamic DNS Environment Configuration
# This file contains sensitive credentials and should be kept secure
# Credentials are automatically retrieved from OnePassword
# CloudFlare API Token (required)
# Retrieved from OnePassword: CloudFlare API Token
export CLOUDFLARE_API_TOKEN="{{ lookup('community.general.onepassword', 'CloudFlare API Token', vault='Dotfiles', field='password') }}"
# Telegram Bot Credentials (for notifications when IP changes)
# Retrieved from OnePassword: Telegram DynDNS Bot
export TELEGRAM_BOT_TOKEN="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='password') }}"
export TELEGRAM_CHAT_ID="{{ lookup('community.general.onepassword', 'Telegram DynDNS Bot', vault='Dotfiles', field='chat_id') }}"