feat: adds nextcloud and plex
fix: caddy stuff
This commit is contained in:
294
.bashrc
294
.bashrc
@@ -25,14 +25,6 @@ fi
|
|||||||
# Docker Compose Alias (Mostly for old shell scripts)
|
# Docker Compose Alias (Mostly for old shell scripts)
|
||||||
alias docker-compose='docker compose'
|
alias docker-compose='docker compose'
|
||||||
|
|
||||||
# tatool aliases
|
|
||||||
alias tls='tatool ls -g'
|
|
||||||
alias tps='tls'
|
|
||||||
alias ti='tatool doctor'
|
|
||||||
alias td='tatool doctor'
|
|
||||||
alias tr='tatool restart'
|
|
||||||
alias tsrc='tatool source'
|
|
||||||
|
|
||||||
# Modern tools aliases
|
# Modern tools aliases
|
||||||
alias l="eza --header --long --git --group-directories-first --group --icons --color=always --sort=name --hyperlink -o --no-permissions"
|
alias l="eza --header --long --git --group-directories-first --group --icons --color=always --sort=name --hyperlink -o --no-permissions"
|
||||||
alias ll='l'
|
alias ll='l'
|
||||||
@@ -75,6 +67,292 @@ alias kubectl="minikube kubectl --"
|
|||||||
# netstat port in use check
|
# netstat port in use check
|
||||||
alias port='netstat -atupn | grep LISTEN'
|
alias port='netstat -atupn | grep LISTEN'
|
||||||
|
|
||||||
|
# Check if a specific port is in use with detailed process information
|
||||||
|
inuse() {
|
||||||
|
# Color definitions
|
||||||
|
local RED='\033[0;31m'
|
||||||
|
local GREEN='\033[0;32m'
|
||||||
|
local YELLOW='\033[1;33m'
|
||||||
|
local BLUE='\033[0;34m'
|
||||||
|
local CYAN='\033[0;36m'
|
||||||
|
local BOLD='\033[1m'
|
||||||
|
local NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Input validation
|
||||||
|
if [ $# -eq 0 ]; then
|
||||||
|
echo -e "${RED}Usage:${NC} inuse <port_number>"
|
||||||
|
echo -e "${YELLOW} inuse --list${NC}"
|
||||||
|
echo -e "${YELLOW} inuse --help${NC}"
|
||||||
|
echo -e "${YELLOW}Example:${NC} inuse 80"
|
||||||
|
echo -e "${YELLOW} inuse --list${NC}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Handle --help option
|
||||||
|
if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
|
||||||
|
echo -e "${CYAN}${BOLD}inuse - Check if a port is in use${NC}"
|
||||||
|
echo
|
||||||
|
echo -e "${BOLD}USAGE:${NC}"
|
||||||
|
echo -e " inuse <port_number> Check if a specific port is in use"
|
||||||
|
echo -e " inuse --list, -l List all Docker services with listening ports"
|
||||||
|
echo -e " inuse --help, -h Show this help message"
|
||||||
|
echo
|
||||||
|
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||||
|
echo -e " ${GREEN}inuse 80${NC} Check if port 80 is in use"
|
||||||
|
echo -e " ${GREEN}inuse 3000${NC} Check if port 3000 is in use"
|
||||||
|
echo -e " ${GREEN}inuse --list${NC} Show all Docker services with ports"
|
||||||
|
echo
|
||||||
|
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||||
|
echo -e " The inuse function checks if a specific port is in use and identifies"
|
||||||
|
echo -e " the process using it. It can detect regular processes, Docker containers"
|
||||||
|
echo -e " with published ports, and containers using host networking."
|
||||||
|
echo
|
||||||
|
echo -e "${BOLD}OUTPUT:${NC}"
|
||||||
|
echo -e " ${GREEN}✓${NC} Port is in use - shows process name, PID, and Docker info if applicable"
|
||||||
|
echo -e " ${RED}✗${NC} Port is free"
|
||||||
|
echo -e " ${YELLOW}⚠${NC} Port is in use but process cannot be identified"
|
||||||
|
echo
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Handle --list option
|
||||||
|
if [ "$1" = "--list" ] || [ "$1" = "-l" ]; then
|
||||||
|
if ! command -v docker >/dev/null 2>&1; then
|
||||||
|
echo -e "${RED}Error:${NC} Docker is not available"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${CYAN}${BOLD}Docker Services with Listening Ports:${NC}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Get all running containers
|
||||||
|
local containers=$(docker ps --format "{{.Names}}" 2>/dev/null)
|
||||||
|
if [ -z "$containers" ]; then
|
||||||
|
echo -e "${YELLOW}No running Docker containers found${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local found_services=false
|
||||||
|
while IFS= read -r container; do
|
||||||
|
# Get port mappings for this container
|
||||||
|
local ports=$(docker port "$container" 2>/dev/null)
|
||||||
|
if [ -n "$ports" ]; then
|
||||||
|
# Get container image name (clean it up)
|
||||||
|
local image=$(docker inspect "$container" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1)
|
||||||
|
local clean_image=$(echo "$image" | sed 's/sha256:[a-f0-9]*/[image-hash]/' | sed 's/^.*\///')
|
||||||
|
|
||||||
|
echo -e "${GREEN}📦 ${BOLD}$container${NC} ${CYAN}($clean_image)${NC}"
|
||||||
|
|
||||||
|
# Parse and display ports nicely
|
||||||
|
echo "$ports" | while IFS= read -r port_line; do
|
||||||
|
if [[ "$port_line" =~ ([0-9]+)/(tcp|udp).*0\.0\.0\.0:([0-9]+) ]]; then
|
||||||
|
local container_port="${BASH_REMATCH[1]}"
|
||||||
|
local protocol="${BASH_REMATCH[2]}"
|
||||||
|
local host_port="${BASH_REMATCH[3]}"
|
||||||
|
echo -e "${CYAN} ├─ Port ${BOLD}$host_port${NC}${CYAN} → $container_port ($protocol)${NC}"
|
||||||
|
elif [[ "$port_line" =~ ([0-9]+)/(tcp|udp).*\[::\]:([0-9]+) ]]; then
|
||||||
|
local container_port="${BASH_REMATCH[1]}"
|
||||||
|
local protocol="${BASH_REMATCH[2]}"
|
||||||
|
local host_port="${BASH_REMATCH[3]}"
|
||||||
|
echo -e "${CYAN} ├─ Port ${BOLD}$host_port${NC}${CYAN} → $container_port ($protocol) [IPv6]${NC}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
found_services=true
|
||||||
|
fi
|
||||||
|
done <<< "$containers"
|
||||||
|
|
||||||
|
# Also check for host networking containers
|
||||||
|
local host_containers=$(docker ps --format "{{.Names}}" --filter "network=host" 2>/dev/null)
|
||||||
|
if [ -n "$host_containers" ]; then
|
||||||
|
echo -e "${YELLOW}${BOLD}Host Networking Containers:${NC}"
|
||||||
|
while IFS= read -r container; do
|
||||||
|
local image=$(docker inspect "$container" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1)
|
||||||
|
local clean_image=$(echo "$image" | sed 's/sha256:[a-f0-9]*/[image-hash]/' | sed 's/^.*\///')
|
||||||
|
echo -e "${YELLOW}🌐 ${BOLD}$container${NC} ${CYAN}($clean_image)${NC} ${YELLOW}- uses host networking${NC}"
|
||||||
|
done <<< "$host_containers"
|
||||||
|
echo
|
||||||
|
found_services=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$found_services" = false ]; then
|
||||||
|
echo -e "${YELLOW}No Docker services with exposed ports found${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local port="$1"
|
||||||
|
|
||||||
|
# Validate port number
|
||||||
|
if ! [[ "$port" =~ ^[0-9]+$ ]] || [ "$port" -lt 1 ] || [ "$port" -gt 65535 ]; then
|
||||||
|
echo -e "${RED}Error:${NC} Invalid port number. Must be between 1 and 65535."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if port is in use first
|
||||||
|
local port_in_use=false
|
||||||
|
if command -v ss >/dev/null 2>&1; then
|
||||||
|
if ss -tulpn 2>/dev/null | grep -q ":$port "; then
|
||||||
|
port_in_use=true
|
||||||
|
fi
|
||||||
|
elif command -v netstat >/dev/null 2>&1; then
|
||||||
|
if netstat -tulpn 2>/dev/null | grep -q ":$port "; then
|
||||||
|
port_in_use=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$port_in_use" = false ]; then
|
||||||
|
echo -e "${RED}✗ Port $port is FREE${NC}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Port is in use, now find what's using it
|
||||||
|
local found_process=false
|
||||||
|
|
||||||
|
# Method 1: Try netstat first (most reliable for PID info)
|
||||||
|
if command -v netstat >/dev/null 2>&1; then
|
||||||
|
local netstat_result=$(netstat -tulpn 2>/dev/null | grep ":$port ")
|
||||||
|
if [ -n "$netstat_result" ]; then
|
||||||
|
while IFS= read -r line; do
|
||||||
|
local pid=$(echo "$line" | awk '{print $7}' | cut -d'/' -f1)
|
||||||
|
local process_name=$(echo "$line" | awk '{print $7}' | cut -d'/' -f2)
|
||||||
|
local protocol=$(echo "$line" | awk '{print $1}')
|
||||||
|
|
||||||
|
if [[ "$pid" =~ ^[0-9]+$ ]] && [ -n "$process_name" ]; then
|
||||||
|
# Check if it's a Docker container
|
||||||
|
local docker_info=""
|
||||||
|
if command -v docker >/dev/null 2>&1; then
|
||||||
|
# Check for docker-proxy
|
||||||
|
if [ "$process_name" = "docker-proxy" ]; then
|
||||||
|
local container_name=$(docker ps --format "{{.Names}}" --filter "publish=$port" 2>/dev/null | head -1)
|
||||||
|
if [ -n "$container_name" ]; then
|
||||||
|
docker_info=" ${CYAN}(Docker: $container_name)${NC}"
|
||||||
|
else
|
||||||
|
docker_info=" ${CYAN}(Docker proxy)${NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Check if process is in a container by examining cgroup
|
||||||
|
if [ -f "/proc/$pid/cgroup" ] && grep -q docker "/proc/$pid/cgroup" 2>/dev/null; then
|
||||||
|
local container_id=$(cat "/proc/$pid/cgroup" 2>/dev/null | grep docker | grep -o '[a-f0-9]\{64\}' | head -1)
|
||||||
|
if [ -n "$container_id" ]; then
|
||||||
|
local container_name=$(docker inspect "$container_id" 2>/dev/null | grep -o '"Name": *"[^"]*"' | cut -d'"' -f4 | sed 's/^\/*//' | head -1)
|
||||||
|
if [ -n "$container_name" ]; then
|
||||||
|
docker_info=" ${CYAN}(Docker: $container_name)${NC}"
|
||||||
|
else
|
||||||
|
docker_info=" ${CYAN}(Docker: ${container_id:0:12})${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}✓ Port $port ($protocol) in use by ${BOLD}$process_name${NC} ${GREEN}as PID ${BOLD}$pid${NC}$docker_info"
|
||||||
|
found_process=true
|
||||||
|
fi
|
||||||
|
done <<< "$netstat_result"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Method 2: Try ss if netstat didn't work
|
||||||
|
if [ "$found_process" = false ] && command -v ss >/dev/null 2>&1; then
|
||||||
|
local ss_result=$(ss -tulpn 2>/dev/null | grep ":$port ")
|
||||||
|
if [ -n "$ss_result" ]; then
|
||||||
|
while IFS= read -r line; do
|
||||||
|
local pid=$(echo "$line" | grep -o 'pid=[0-9]*' | cut -d'=' -f2)
|
||||||
|
local protocol=$(echo "$line" | awk '{print $1}')
|
||||||
|
|
||||||
|
if [[ "$pid" =~ ^[0-9]+$ ]]; then
|
||||||
|
local process_name=$(ps -p "$pid" -o comm= 2>/dev/null)
|
||||||
|
if [ -n "$process_name" ]; then
|
||||||
|
# Check for Docker container
|
||||||
|
local docker_info=""
|
||||||
|
if command -v docker >/dev/null 2>&1; then
|
||||||
|
if [ "$process_name" = "docker-proxy" ]; then
|
||||||
|
local container_name=$(docker ps --format "{{.Names}}" --filter "publish=$port" 2>/dev/null | head -1)
|
||||||
|
if [ -n "$container_name" ]; then
|
||||||
|
docker_info=" ${CYAN}(Docker: $container_name)${NC}"
|
||||||
|
else
|
||||||
|
docker_info=" ${CYAN}(Docker proxy)${NC}"
|
||||||
|
fi
|
||||||
|
elif [ -f "/proc/$pid/cgroup" ] && grep -q docker "/proc/$pid/cgroup" 2>/dev/null; then
|
||||||
|
local container_id=$(cat "/proc/$pid/cgroup" 2>/dev/null | grep docker | grep -o '[a-f0-9]\{64\}' | head -1)
|
||||||
|
if [ -n "$container_id" ]; then
|
||||||
|
local container_name=$(docker inspect "$container_id" 2>/dev/null | grep -o '"Name": *"[^"]*"' | cut -d'"' -f4 | sed 's/^\/*//' | head -1)
|
||||||
|
if [ -n "$container_name" ]; then
|
||||||
|
docker_info=" ${CYAN}(Docker: $container_name)${NC}"
|
||||||
|
else
|
||||||
|
docker_info=" ${CYAN}(Docker: ${container_id:0:12})${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}✓ Port $port ($protocol) in use by ${BOLD}$process_name${NC} ${GREEN}as PID ${BOLD}$pid${NC}$docker_info"
|
||||||
|
found_process=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done <<< "$ss_result"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Method 3: Try fuser as last resort
|
||||||
|
if [ "$found_process" = false ] && command -v fuser >/dev/null 2>&1; then
|
||||||
|
local fuser_pids=$(fuser "$port/tcp" 2>/dev/null)
|
||||||
|
if [ -n "$fuser_pids" ]; then
|
||||||
|
for pid in $fuser_pids; do
|
||||||
|
if [[ "$pid" =~ ^[0-9]+$ ]]; then
|
||||||
|
local process_name=$(ps -p "$pid" -o comm= 2>/dev/null)
|
||||||
|
if [ -n "$process_name" ]; then
|
||||||
|
echo -e "${GREEN}✓ Port $port (tcp) in use by ${BOLD}$process_name${NC} ${GREEN}as PID ${BOLD}$pid${NC}"
|
||||||
|
found_process=true
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Method 4: Check for Docker containers more accurately
|
||||||
|
if [ "$found_process" = false ] && command -v docker >/dev/null 2>&1; then
|
||||||
|
# First, try to find containers with published ports matching our port
|
||||||
|
local container_with_port=$(docker ps --format "{{.Names}}" --filter "publish=$port" 2>/dev/null | head -1)
|
||||||
|
if [ -n "$container_with_port" ]; then
|
||||||
|
local image=$(docker inspect "$container_with_port" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1)
|
||||||
|
echo -e "${GREEN}✓ Port $port in use by Docker container ${BOLD}$container_with_port${NC} ${CYAN}(published port, image: $image)${NC}"
|
||||||
|
found_process=true
|
||||||
|
else
|
||||||
|
# Only check host networking containers if we haven't found anything else
|
||||||
|
local host_containers=$(docker ps --format "{{.Names}}" --filter "network=host" 2>/dev/null)
|
||||||
|
if [ -n "$host_containers" ]; then
|
||||||
|
local host_container_count=$(echo "$host_containers" | wc -l)
|
||||||
|
if [ "$host_container_count" -eq 1 ]; then
|
||||||
|
# Only one host networking container, likely candidate
|
||||||
|
local image=$(docker inspect "$host_containers" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1)
|
||||||
|
echo -e "${YELLOW}⚠ Port $port possibly in use by Docker container ${BOLD}$host_containers${NC} ${CYAN}(host networking, image: $image)${NC}"
|
||||||
|
found_process=true
|
||||||
|
else
|
||||||
|
# Multiple host networking containers, can't determine which one
|
||||||
|
echo -e "${YELLOW}⚠ Port $port is in use, multiple Docker containers using host networking:${NC}"
|
||||||
|
while IFS= read -r container; do
|
||||||
|
local image=$(docker inspect "$container" 2>/dev/null | grep -o '"Image": *"[^"]*"' | cut -d'"' -f4 | head -1)
|
||||||
|
echo -e "${CYAN} - $container (image: $image)${NC}"
|
||||||
|
done <<< "$host_containers"
|
||||||
|
found_process=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If we still haven't found the process, show a generic message
|
||||||
|
if [ "$found_process" = false ]; then
|
||||||
|
echo -e "${YELLOW}⚠ Port $port is in use but unable to identify the process${NC}"
|
||||||
|
echo -e "${CYAN} This might be due to insufficient permissions or the process being in a different namespace${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
# random string (Syntax: random <length>)
|
# random string (Syntax: random <length>)
|
||||||
alias random='openssl rand -base64'
|
alias random='openssl rand -base64'
|
||||||
|
|
||||||
|
@@ -213,6 +213,30 @@ def ensure_ansible_collections():
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_sudo_password_from_1password(username, hostname):
|
||||||
|
"""Fetches the sudo password from 1Password using the op CLI tool."""
|
||||||
|
printfe("cyan", "Attempting to fetch sudo password from 1Password...")
|
||||||
|
try:
|
||||||
|
op_command = [
|
||||||
|
"op",
|
||||||
|
"read",
|
||||||
|
f"op://Dotfiles/sudo/{username} {hostname}",
|
||||||
|
]
|
||||||
|
result = subprocess.run(op_command, capture_output=True, text=True, check=True)
|
||||||
|
password = result.stdout.strip()
|
||||||
|
printfe("green", "Successfully fetched sudo password from 1Password.")
|
||||||
|
return password
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
printfe("red", f"Failed to fetch password from 1Password: {e.stderr.strip()}")
|
||||||
|
return None
|
||||||
|
except FileNotFoundError:
|
||||||
|
printfe("red", "Error: 'op' command not found. Please ensure 1Password CLI is installed and in your PATH.")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
printfe("red", f"An unexpected error occurred while fetching password: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Parse arguments
|
# Parse arguments
|
||||||
parser = argparse.ArgumentParser(add_help=False)
|
parser = argparse.ArgumentParser(add_help=False)
|
||||||
@@ -353,13 +377,7 @@ def main():
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
printfe("cyan", "Running Ansible playbook...")
|
printfe("cyan", "Running Ansible playbook...")
|
||||||
# Determine which playbook to use based on tags
|
playbook_path = f"{dotfiles_path}/config/ansible/playbook.yml"
|
||||||
if args.tags and any(tag.strip() in ['caddy', 'country-blocking', 'caddyfile', 'config'] for tag in args.tags.split(',')):
|
|
||||||
playbook_path = f"{dotfiles_path}/config/ansible/caddy-playbook.yml"
|
|
||||||
printfe("cyan", f"Using dedicated Caddy playbook for tags: {args.tags}")
|
|
||||||
else:
|
|
||||||
playbook_path = f"{dotfiles_path}/config/ansible/playbook.yml"
|
|
||||||
|
|
||||||
ansible_cmd = [
|
ansible_cmd = [
|
||||||
"/usr/bin/env",
|
"/usr/bin/env",
|
||||||
"ansible-playbook",
|
"ansible-playbook",
|
||||||
@@ -372,9 +390,20 @@ def main():
|
|||||||
f"ansible_user={username}",
|
f"ansible_user={username}",
|
||||||
"--limit",
|
"--limit",
|
||||||
hostname,
|
hostname,
|
||||||
"--ask-become-pass",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
sudo_password = None
|
||||||
|
if not os.isatty(sys.stdin.fileno()):
|
||||||
|
printfe("yellow", "Warning: Not running in an interactive terminal. Cannot fetch password from 1Password.")
|
||||||
|
ansible_cmd.append("--ask-become-pass")
|
||||||
|
else:
|
||||||
|
sudo_password = get_sudo_password_from_1password(username, hostname)
|
||||||
|
if sudo_password:
|
||||||
|
ansible_cmd.extend(["--become-pass-file", "-"])
|
||||||
|
else:
|
||||||
|
printfe("yellow", "Could not fetch password from 1Password. Falling back to --ask-become-pass.")
|
||||||
|
ansible_cmd.append("--ask-become-pass")
|
||||||
|
|
||||||
if args.tags:
|
if args.tags:
|
||||||
ansible_cmd.extend(["--tags", args.tags])
|
ansible_cmd.extend(["--tags", args.tags])
|
||||||
|
|
||||||
@@ -384,7 +413,12 @@ def main():
|
|||||||
# Debug: Show the command being executed
|
# Debug: Show the command being executed
|
||||||
printfe("yellow", f"Debug: Executing command: {' '.join(ansible_cmd)}")
|
printfe("yellow", f"Debug: Executing command: {' '.join(ansible_cmd)}")
|
||||||
|
|
||||||
result = subprocess.run(ansible_cmd)
|
# Execute the Ansible command, passing password via stdin if available
|
||||||
|
if sudo_password:
|
||||||
|
result = subprocess.run(ansible_cmd, input=sudo_password.encode('utf-8'))
|
||||||
|
else:
|
||||||
|
result = subprocess.run(ansible_cmd)
|
||||||
|
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
printfe("red", "Failed to upgrade Ansible packages.")
|
printfe("red", "Failed to upgrade Ansible packages.")
|
||||||
return 1
|
return 1
|
||||||
|
@@ -1,104 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Configure Caddy service
|
|
||||||
hosts: all
|
|
||||||
handlers:
|
|
||||||
- name: Import handler tasks
|
|
||||||
ansible.builtin.import_tasks: handlers/main.yml
|
|
||||||
gather_facts: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Set Caddy directories (basic)
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
caddy_service_dir: "{{ ansible_env.HOME }}/services/caddy"
|
|
||||||
caddy_data_dir: "/mnt/object_storage/services/caddy"
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- setup
|
|
||||||
- country-blocking
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: Get Caddy email from 1Password
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
caddy_email: "{{ lookup('community.general.onepassword', 'Caddy (Proxy)', vault='Dotfiles', field='email') }}"
|
|
||||||
ignore_errors: true
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- config
|
|
||||||
- caddyfile
|
|
||||||
- country-blocking
|
|
||||||
|
|
||||||
- name: Set fallback email if 1Password failed
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
caddy_email: "admin@example.com"
|
|
||||||
when: caddy_email is not defined
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- config
|
|
||||||
- caddyfile
|
|
||||||
- country-blocking
|
|
||||||
|
|
||||||
- name: Setup country blocking
|
|
||||||
ansible.builtin.include_tasks: tasks/servers/services/caddy/country-blocking.yml
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- country-blocking
|
|
||||||
- security
|
|
||||||
|
|
||||||
- name: Create Caddy directory
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ caddy_service_dir }}"
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- setup
|
|
||||||
|
|
||||||
- name: Create Caddy network
|
|
||||||
ansible.builtin.command: docker network create caddy_default
|
|
||||||
register: create_caddy_network
|
|
||||||
failed_when:
|
|
||||||
- create_caddy_network.rc != 0
|
|
||||||
- "'already exists' not in create_caddy_network.stderr"
|
|
||||||
changed_when: create_caddy_network.rc == 0
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- docker
|
|
||||||
- network
|
|
||||||
|
|
||||||
- name: Deploy Caddy docker-compose.yml
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: tasks/servers/services/caddy/docker-compose.yml.j2
|
|
||||||
dest: "{{ caddy_service_dir }}/docker-compose.yml"
|
|
||||||
mode: "0644"
|
|
||||||
register: caddy_compose
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- docker
|
|
||||||
- config
|
|
||||||
|
|
||||||
- name: Deploy Caddy Caddyfile
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: tasks/servers/services/caddy/Caddyfile.j2
|
|
||||||
dest: "{{ caddy_service_dir }}/Caddyfile"
|
|
||||||
mode: "0644"
|
|
||||||
register: caddy_file
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- config
|
|
||||||
- caddyfile
|
|
||||||
|
|
||||||
- name: Stop Caddy service
|
|
||||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
|
|
||||||
when: caddy_compose.changed or caddy_file.changed
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- docker
|
|
||||||
- service
|
|
||||||
|
|
||||||
- name: Start Caddy service
|
|
||||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
|
|
||||||
when: caddy_compose.changed or caddy_file.changed
|
|
||||||
tags:
|
|
||||||
- caddy
|
|
||||||
- docker
|
|
||||||
- service
|
|
@@ -18,6 +18,23 @@
|
|||||||
tags:
|
tags:
|
||||||
- juicefs
|
- juicefs
|
||||||
|
|
||||||
|
- name: System performance optimizations
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
value: "{{ item.value }}"
|
||||||
|
state: present
|
||||||
|
reload: true
|
||||||
|
become: true
|
||||||
|
loop:
|
||||||
|
- { name: "fs.file-max", value: "2097152" } # Max open files for the entire system
|
||||||
|
- { name: "vm.max_map_count", value: "16777216" } # Max memory map areas a process can have
|
||||||
|
- { name: "vm.swappiness", value: "10" } # Controls how aggressively the kernel swaps out memory
|
||||||
|
- { name: "vm.vfs_cache_pressure", value: "50" } # Controls kernel's tendency to reclaim memory for directory/inode caches
|
||||||
|
- { name: "net.core.somaxconn", value: "65535" } # Max pending connections for a listening socket
|
||||||
|
- { name: "net.core.netdev_max_backlog", value: "65535" } # Max packets queued on network interface input
|
||||||
|
- { name: "net.ipv4.tcp_fin_timeout", value: "30" } # How long sockets stay in FIN-WAIT-2 state
|
||||||
|
- { name: "net.ipv4.tcp_tw_reuse", value: "1" } # Allows reusing TIME_WAIT sockets for new outgoing connections
|
||||||
|
|
||||||
- name: Include service tasks
|
- name: Include service tasks
|
||||||
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
|
ansible.builtin.include_tasks: "services/{{ item.name }}/{{ item.name }}.yml"
|
||||||
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
|
loop: "{{ services | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list if specific_service is not defined else services | selectattr('name', 'equalto', specific_service) | selectattr('enabled', 'equalto', true) | selectattr('hosts', 'contains', inventory_hostname) | list }}"
|
||||||
@@ -25,6 +42,7 @@
|
|||||||
label: "{{ item.name }}"
|
label: "{{ item.name }}"
|
||||||
tags:
|
tags:
|
||||||
- services
|
- services
|
||||||
|
- always
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
services:
|
services:
|
||||||
@@ -49,10 +67,17 @@
|
|||||||
enabled: true
|
enabled: true
|
||||||
hosts:
|
hosts:
|
||||||
- mennos-cloud-server
|
- mennos-cloud-server
|
||||||
- name: jellyfin
|
- name: plex
|
||||||
|
enabled: true
|
||||||
|
hosts:
|
||||||
|
- mennos-cachyos-desktop
|
||||||
|
- name: tautulli
|
||||||
|
enabled: true
|
||||||
|
hosts:
|
||||||
|
- mennos-cachyos-desktop
|
||||||
|
- name: stash
|
||||||
enabled: true
|
enabled: true
|
||||||
hosts:
|
hosts:
|
||||||
- mennos-cloud-server
|
|
||||||
- mennos-cachyos-desktop
|
- mennos-cachyos-desktop
|
||||||
- name: seafile
|
- name: seafile
|
||||||
enabled: true
|
enabled: true
|
||||||
@@ -82,6 +107,10 @@
|
|||||||
enabled: true
|
enabled: true
|
||||||
hosts:
|
hosts:
|
||||||
- mennos-cloud-server
|
- mennos-cloud-server
|
||||||
|
- name: nextcloud
|
||||||
|
enabled: true
|
||||||
|
hosts:
|
||||||
|
- mennos-cachyos-desktop
|
||||||
- name: echoip
|
- name: echoip
|
||||||
enabled: true
|
enabled: true
|
||||||
hosts:
|
hosts:
|
||||||
|
@@ -36,8 +36,8 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- arr_stack_net
|
- arr_stack_net
|
||||||
|
|
||||||
lidarr:
|
whisparr:
|
||||||
image: linuxserver/lidarr:latest
|
image: ghcr.io/hotio/whisparr:latest
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=100
|
- PGID=100
|
||||||
@@ -47,7 +47,7 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- host.docker.internal:host-gateway
|
- host.docker.internal:host-gateway
|
||||||
volumes:
|
volumes:
|
||||||
- {{ arr_stack_data_dir }}/lidarr-config:/config
|
- {{ arr_stack_data_dir }}/whisparr-config:/config
|
||||||
- /mnt/object_storage:/storage
|
- /mnt/object_storage:/storage
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
networks:
|
networks:
|
||||||
|
@@ -98,23 +98,6 @@ df.mvl.sh {
|
|||||||
tls {{ caddy_email }}
|
tls {{ caddy_email }}
|
||||||
}
|
}
|
||||||
|
|
||||||
overseerr.mvl.sh jellyseerr.mvl.sh overseerr.vleeuwen.me jellyseerr.vleeuwen.me {
|
|
||||||
import country_block
|
|
||||||
reverse_proxy mennos-cachyos-desktop:5555
|
|
||||||
tls {{ caddy_email }}
|
|
||||||
}
|
|
||||||
|
|
||||||
anime.mvl.sh anime.vleeuwen.me {
|
|
||||||
import country_block
|
|
||||||
reverse_proxy jellyfin:8096
|
|
||||||
tls {{ caddy_email }}
|
|
||||||
}
|
|
||||||
|
|
||||||
fladder.mvl.sh {
|
|
||||||
import country_block
|
|
||||||
reverse_proxy fladder:80
|
|
||||||
tls {{ caddy_email }}
|
|
||||||
}
|
|
||||||
{% elif inventory_hostname == 'mennos-cachyos-desktop' %}
|
{% elif inventory_hostname == 'mennos-cachyos-desktop' %}
|
||||||
home.vleeuwen.me {
|
home.vleeuwen.me {
|
||||||
import country_block
|
import country_block
|
||||||
@@ -127,16 +110,12 @@ home.vleeuwen.me {
|
|||||||
}
|
}
|
||||||
tls {{ caddy_email }}
|
tls {{ caddy_email }}
|
||||||
}
|
}
|
||||||
|
|
||||||
bin.mvl.sh {
|
bin.mvl.sh {
|
||||||
import country_block
|
import country_block
|
||||||
reverse_proxy privatebin:8080
|
reverse_proxy privatebin:8080
|
||||||
tls {{ caddy_email }}
|
tls {{ caddy_email }}
|
||||||
}
|
}
|
||||||
jellyfin.mvl.sh jellyfin.vleeuwen.me {
|
|
||||||
import country_block
|
|
||||||
reverse_proxy jellyfin:8096
|
|
||||||
tls {{ caddy_email }}
|
|
||||||
}
|
|
||||||
|
|
||||||
ip.mvl.sh ip.vleeuwen.me {
|
ip.mvl.sh ip.vleeuwen.me {
|
||||||
import country_block
|
import country_block
|
||||||
@@ -158,4 +137,66 @@ http://ip.mvl.sh http://ip.vleeuwen.me {
|
|||||||
header_up X-Forwarded-Host {host}
|
header_up X-Forwarded-Host {host}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overseerr.mvl.sh overseerr.vleeuwen.me {
|
||||||
|
import country_block
|
||||||
|
reverse_proxy host.docker.internal:5555
|
||||||
|
tls {{ caddy_email }}
|
||||||
|
}
|
||||||
|
|
||||||
|
plex.mvl.sh plex.vleeuwen.me {
|
||||||
|
import country_block
|
||||||
|
reverse_proxy host.docker.internal:32400 {
|
||||||
|
header_up Host {upstream_hostport}
|
||||||
|
header_up X-Real-IP {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-For {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-Proto {scheme}
|
||||||
|
header_up X-Forwarded-Host {host}
|
||||||
|
}
|
||||||
|
tls {{ caddy_email }}
|
||||||
|
}
|
||||||
|
|
||||||
|
drive.mvl.sh drive.vleeuwen.me {
|
||||||
|
import country_block
|
||||||
|
|
||||||
|
# CalDAV and CardDAV redirects
|
||||||
|
redir /.well-known/carddav /remote.php/dav/ 301
|
||||||
|
redir /.well-known/caldav /remote.php/dav/ 301
|
||||||
|
|
||||||
|
# Handle other .well-known requests
|
||||||
|
handle /.well-known/* {
|
||||||
|
reverse_proxy nextcloud:80 {
|
||||||
|
header_up Host {host}
|
||||||
|
header_up X-Real-IP {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-For {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-Proto {scheme}
|
||||||
|
header_up X-Forwarded-Host {host}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main reverse proxy configuration with proper headers
|
||||||
|
reverse_proxy nextcloud:80 {
|
||||||
|
header_up Host {host}
|
||||||
|
header_up X-Real-IP {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-For {http.request.remote.host}
|
||||||
|
header_up X-Forwarded-Proto {scheme}
|
||||||
|
header_up X-Forwarded-Host {host}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
header {
|
||||||
|
# HSTS header for enhanced security (required by Nextcloud)
|
||||||
|
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
|
||||||
|
# Additional security headers recommended for Nextcloud
|
||||||
|
X-Content-Type-Options "nosniff"
|
||||||
|
X-Frame-Options "SAMEORIGIN"
|
||||||
|
Referrer-Policy "no-referrer"
|
||||||
|
X-XSS-Protection "1; mode=block"
|
||||||
|
X-Permitted-Cross-Domain-Policies "none"
|
||||||
|
X-Robots-Tag "noindex, nofollow"
|
||||||
|
}
|
||||||
|
|
||||||
|
tls {{ caddy_email }}
|
||||||
|
}
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@@ -1,41 +0,0 @@
|
|||||||
services:
|
|
||||||
jellyfin:
|
|
||||||
image: lscr.io/linuxserver/jellyfin:latest
|
|
||||||
container_name: jellyfin
|
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=100
|
|
||||||
- TZ=Europe/Amsterdam
|
|
||||||
volumes:
|
|
||||||
- {{ jellyfin_data_dir }}/jellyfin-config:/config
|
|
||||||
- {{ '/mnt/data/movies' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/movies' }}:/movies
|
|
||||||
- {{ '/mnt/data/tvshows' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/tvshows' }}:/tvshows
|
|
||||||
- {{ '/mnt/data/music' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/music' }}:/music
|
|
||||||
ports:
|
|
||||||
- 8096:8096
|
|
||||||
- 8920:8920
|
|
||||||
- 7359:7359/udp
|
|
||||||
- 1901:1900/udp
|
|
||||||
restart: unless-stopped
|
|
||||||
group_add:
|
|
||||||
- "992"
|
|
||||||
- "44"
|
|
||||||
networks:
|
|
||||||
- caddy_network
|
|
||||||
|
|
||||||
fladder:
|
|
||||||
image: ghcr.io/donutware/fladder:latest
|
|
||||||
ports:
|
|
||||||
- 5423:80
|
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=100
|
|
||||||
- TZ=Europe/Amsterdam
|
|
||||||
- BASE_URL=https://jellyfin.mvl.sh
|
|
||||||
networks:
|
|
||||||
- caddy_network
|
|
||||||
|
|
||||||
networks:
|
|
||||||
caddy_network:
|
|
||||||
external: true
|
|
||||||
name: caddy_default
|
|
||||||
|
@@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Deploy Jellyfin service
|
|
||||||
block:
|
|
||||||
- name: Set Jellyfin directories
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
jellyfin_data_dir: "{{ '/mnt/services/jellyfin' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/services/jellyfin' }}"
|
|
||||||
jellyfin_service_dir: "{{ ansible_env.HOME }}/services/jellyfin"
|
|
||||||
|
|
||||||
- name: Create Jellyfin directories
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ jellyfin_dir }}"
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
loop:
|
|
||||||
- "{{ jellyfin_data_dir }}"
|
|
||||||
- "{{ jellyfin_service_dir }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: jellyfin_dir
|
|
||||||
|
|
||||||
- name: Deploy Jellyfin docker-compose.yml
|
|
||||||
ansible.builtin.template:
|
|
||||||
src: docker-compose.yml.j2
|
|
||||||
dest: "{{ jellyfin_service_dir }}/docker-compose.yml"
|
|
||||||
mode: "0644"
|
|
||||||
register: jellyfin_compose
|
|
||||||
|
|
||||||
- name: Stop Jellyfin service
|
|
||||||
ansible.builtin.command: docker compose -f "{{ jellyfin_service_dir }}/docker-compose.yml" down --remove-orphans
|
|
||||||
when: jellyfin_compose.changed
|
|
||||||
|
|
||||||
- name: Start Jellyfin service
|
|
||||||
ansible.builtin.command: docker compose -f "{{ jellyfin_service_dir }}/docker-compose.yml" up -d
|
|
||||||
when: jellyfin_compose.changed
|
|
||||||
tags:
|
|
||||||
- services
|
|
||||||
- jellyfin
|
|
@@ -0,0 +1,61 @@
|
|||||||
|
services:
|
||||||
|
nextcloud:
|
||||||
|
image: nextcloud
|
||||||
|
container_name: nextcloud
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- nextcloud
|
||||||
|
- caddy_network
|
||||||
|
depends_on:
|
||||||
|
- nextclouddb
|
||||||
|
- redis
|
||||||
|
ports:
|
||||||
|
- 8081:80
|
||||||
|
volumes:
|
||||||
|
- {{ nextcloud_data_dir }}/nextcloud/html:/var/www/html
|
||||||
|
- {{ nextcloud_data_dir }}/nextcloud/custom_apps:/var/www/html/custom_apps
|
||||||
|
- {{ nextcloud_data_dir }}/nextcloud/config:/var/www/html/config
|
||||||
|
- {{ nextcloud_data_dir }}/nextcloud/data:/var/www/html/data
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=100
|
||||||
|
- TZ=Europe/Amsterdam
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
|
||||||
|
- MYSQL_HOST=nextclouddb
|
||||||
|
- REDIS_HOST=redis
|
||||||
|
|
||||||
|
nextclouddb:
|
||||||
|
image: mariadb:11.4.7
|
||||||
|
container_name: nextcloud-db
|
||||||
|
restart: unless-stopped
|
||||||
|
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
|
||||||
|
networks:
|
||||||
|
- nextcloud
|
||||||
|
volumes:
|
||||||
|
- {{ nextcloud_data_dir }}/database:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=100
|
||||||
|
- TZ=Europe/Amsterdam
|
||||||
|
- MYSQL_RANDOM_ROOT_PASSWORD=true
|
||||||
|
- MYSQL_PASSWORD={{ lookup('community.general.onepassword', 'Nextcloud', vault='Dotfiles', field='MYSQL_NEXTCLOUD_PASSWORD') }}
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:alpine
|
||||||
|
container_name: redis
|
||||||
|
volumes:
|
||||||
|
- {{ nextcloud_data_dir }}/redis:/data
|
||||||
|
networks:
|
||||||
|
- nextcloud
|
||||||
|
|
||||||
|
networks:
|
||||||
|
nextcloud:
|
||||||
|
name: nextcloud
|
||||||
|
driver: bridge
|
||||||
|
caddy_network:
|
||||||
|
name: caddy_default
|
||||||
|
external: true
|
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Nextcloud service
|
||||||
|
block:
|
||||||
|
- name: Set Nextcloud directories
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nextcloud_service_dir: "{{ ansible_env.HOME }}/services/nextcloud"
|
||||||
|
nextcloud_data_dir: "/mnt/services/nextcloud"
|
||||||
|
|
||||||
|
- name: Create Nextcloud directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nextcloud_service_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Deploy Nextcloud docker-compose.yml
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ nextcloud_service_dir }}/docker-compose.yml"
|
||||||
|
mode: "0644"
|
||||||
|
register: nextcloud_compose
|
||||||
|
|
||||||
|
- name: Stop Nextcloud service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||||
|
when: nextcloud_compose.changed
|
||||||
|
|
||||||
|
- name: Start Nextcloud service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ nextcloud_service_dir }}/docker-compose.yml" up -d
|
||||||
|
when: nextcloud_compose.changed
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- nextcloud
|
@@ -0,0 +1,26 @@
|
|||||||
|
services:
|
||||||
|
plex:
|
||||||
|
image: lscr.io/linuxserver/plex:latest
|
||||||
|
network_mode: host
|
||||||
|
restart: unless-stopped
|
||||||
|
runtime: nvidia
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=100
|
||||||
|
- TZ=Europe/Amsterdam
|
||||||
|
- VERSION=docker
|
||||||
|
- NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
- NVIDIA_DRIVER_CAPABILITIES=all
|
||||||
|
volumes:
|
||||||
|
- {{ plex_data_dir }}/config:/config
|
||||||
|
- {{ plex_data_dir }}/transcode:/transcode
|
||||||
|
- {{ '/mnt/data/movies' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/movies' }}:/movies
|
||||||
|
- {{ '/mnt/data/tvshows' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/tvshows' }}:/tvshows
|
||||||
|
- {{ '/mnt/data/music' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/music' }}:/music
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: all
|
||||||
|
capabilities: [gpu]
|
36
config/ansible/tasks/servers/services/plex/plex.yml
Normal file
36
config/ansible/tasks/servers/services/plex/plex.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Plex service
|
||||||
|
block:
|
||||||
|
- name: Set Plex directories
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
plex_data_dir: "{{ '/mnt/services/plex' if inventory_hostname == 'mennos-cachyos-desktop' else '/mnt/object_storage/services/plex' }}"
|
||||||
|
plex_service_dir: "{{ ansible_env.HOME }}/services/plex"
|
||||||
|
|
||||||
|
- name: Create Plex directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ plex_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ plex_data_dir }}"
|
||||||
|
- "{{ plex_service_dir }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: plex_dir
|
||||||
|
|
||||||
|
- name: Deploy Plex docker-compose.yml
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ plex_service_dir }}/docker-compose.yml"
|
||||||
|
mode: "0644"
|
||||||
|
register: plex_compose
|
||||||
|
|
||||||
|
- name: Stop Plex service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||||
|
when: plex_compose.changed
|
||||||
|
|
||||||
|
- name: Start Plex service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ plex_service_dir }}/docker-compose.yml" up -d
|
||||||
|
when: plex_compose.changed
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- plex
|
@@ -0,0 +1,37 @@
|
|||||||
|
services:
|
||||||
|
stash:
|
||||||
|
image: stashapp/stash:latest
|
||||||
|
container_name: stash
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "9999:9999"
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- STASH_STASH=/data/
|
||||||
|
- STASH_GENERATED=/generated/
|
||||||
|
- STASH_METADATA=/metadata/
|
||||||
|
- STASH_CACHE=/cache/
|
||||||
|
- STASH_PORT=9999
|
||||||
|
volumes:
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
## Point this at your collection.
|
||||||
|
- {{ stash_data_dir }}:/data
|
||||||
|
|
||||||
|
## Keep configs, scrapers, and plugins here.
|
||||||
|
- {{ stash_config_dir }}/config:/root/.stash
|
||||||
|
## This is where your stash's metadata lives
|
||||||
|
- {{ stash_config_dir }}/metadata:/metadata
|
||||||
|
## Any other cache content.
|
||||||
|
- {{ stash_config_dir }}/cache:/cache
|
||||||
|
## Where to store binary blob data (scene covers, images)
|
||||||
|
- {{ stash_config_dir }}/blobs:/blobs
|
||||||
|
## Where to store generated content (screenshots,previews,transcodes,sprites)
|
||||||
|
- {{ stash_config_dir }}/generated:/generated
|
||||||
|
networks:
|
||||||
|
- caddy_network
|
||||||
|
|
||||||
|
networks:
|
||||||
|
caddy_network:
|
||||||
|
external: true
|
||||||
|
name: caddy_default
|
37
config/ansible/tasks/servers/services/stash/stash.yml
Normal file
37
config/ansible/tasks/servers/services/stash/stash.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Stash service
|
||||||
|
block:
|
||||||
|
- name: Set Stash directories
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
stash_data_dir: '/mnt/data/stash'
|
||||||
|
stash_config_dir: '/mnt/services/stash'
|
||||||
|
stash_service_dir: "{{ ansible_env.HOME }}/services/stash"
|
||||||
|
|
||||||
|
- name: Create Stash directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ stash_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ stash_data_dir }}"
|
||||||
|
- "{{ stash_service_dir }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: stash_dir
|
||||||
|
|
||||||
|
- name: Deploy Stash docker-compose.yml
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ stash_service_dir }}/docker-compose.yml"
|
||||||
|
mode: "0644"
|
||||||
|
register: stash_compose
|
||||||
|
|
||||||
|
- name: Stop Stash service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||||
|
when: stash_compose.changed
|
||||||
|
|
||||||
|
- name: Start Stash service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ stash_service_dir }}/docker-compose.yml" up -d
|
||||||
|
when: stash_compose.changed
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- stash
|
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
tautulli:
|
||||||
|
image: lscr.io/linuxserver/tautulli:latest
|
||||||
|
container_name: tautulli
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=100
|
||||||
|
- TZ=Etc/Amsterdam
|
||||||
|
volumes:
|
||||||
|
- {{ tautulli_data_dir }}:/config
|
||||||
|
ports:
|
||||||
|
- 8181:8181
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- caddy_network
|
||||||
|
|
||||||
|
networks:
|
||||||
|
caddy_network:
|
||||||
|
external: true
|
||||||
|
name: caddy_default
|
36
config/ansible/tasks/servers/services/tautulli/tautulli.yml
Normal file
36
config/ansible/tasks/servers/services/tautulli/tautulli.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
- name: Deploy Tautulli service
|
||||||
|
block:
|
||||||
|
- name: Set Tautulli directories
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
tautulli_data_dir: "{{ '/mnt/services/tautulli' }}"
|
||||||
|
tautulli_service_dir: "{{ ansible_env.HOME }}/services/tautulli"
|
||||||
|
|
||||||
|
- name: Create Tautulli directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ tautulli_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ tautulli_data_dir }}"
|
||||||
|
- "{{ tautulli_service_dir }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: tautulli_dir
|
||||||
|
|
||||||
|
- name: Deploy Tautulli docker-compose.yml
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker-compose.yml.j2
|
||||||
|
dest: "{{ tautulli_service_dir }}/docker-compose.yml"
|
||||||
|
mode: "0644"
|
||||||
|
register: tautulli_compose
|
||||||
|
|
||||||
|
- name: Stop Tautulli service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||||
|
when: tautulli_compose.changed
|
||||||
|
|
||||||
|
- name: Start Tautulli service
|
||||||
|
ansible.builtin.command: docker compose -f "{{ tautulli_service_dir }}/docker-compose.yml" up -d
|
||||||
|
when: tautulli_compose.changed
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- tautulli
|
11
config/autostart/Nextcloud.desktop
Normal file
11
config/autostart/Nextcloud.desktop
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[Desktop Entry]
|
||||||
|
Name=Nextcloud
|
||||||
|
GenericName=File Synchronizer
|
||||||
|
Exec="/usr/bin/nextcloud" --background
|
||||||
|
Terminal=false
|
||||||
|
Icon=Nextcloud
|
||||||
|
Categories=Network
|
||||||
|
Type=Application
|
||||||
|
StartupNotify=false
|
||||||
|
X-GNOME-Autostart-enabled=true
|
||||||
|
X-GNOME-Autostart-Delay=10
|
@@ -1,5 +1,5 @@
|
|||||||
Host *
|
Host *
|
||||||
IdentityAgent ~/.1password/agent.sock
|
IdentityAgent SSH_AUTH_SOCK
|
||||||
AddKeysToAgent yes
|
AddKeysToAgent yes
|
||||||
ForwardAgent yes
|
ForwardAgent yes
|
||||||
|
|
||||||
|
@@ -77,5 +77,14 @@
|
|||||||
"workbench.colorTheme": "Default Dark+",
|
"workbench.colorTheme": "Default Dark+",
|
||||||
"ansible.lightspeed.enabled": false,
|
"ansible.lightspeed.enabled": false,
|
||||||
"ansible.lightspeed.suggestions.enabled": false,
|
"ansible.lightspeed.suggestions.enabled": false,
|
||||||
"docker.extension.enableComposeLanguageServer": false
|
"docker.extension.enableComposeLanguageServer": false,
|
||||||
|
"roo-cline.allowedCommands": [
|
||||||
|
"npm test",
|
||||||
|
"npm install",
|
||||||
|
"tsc",
|
||||||
|
"git log",
|
||||||
|
"git diff",
|
||||||
|
"git show"
|
||||||
|
],
|
||||||
|
"roo-cline.deniedCommands": []
|
||||||
}
|
}
|
Reference in New Issue
Block a user