Compare commits
456 Commits
legacy
...
0026a6d8dc
Author | SHA1 | Date | |
---|---|---|---|
0026a6d8dc
|
|||
dc0aae414e
|
|||
e9a558e3fc
|
|||
8dc9f9f474
|
|||
4f80baa3de
|
|||
f028632195
|
|||
78bc87e098
|
|||
d7becf1b0b
|
|||
4d600a12d3
|
|||
14033cd4b3
|
|||
8400afba46
|
|||
4bba6e6c54
|
|||
7032f80cc5
|
|||
7186937911
|
|||
1d0448c48b
|
|||
c02da08bf6
|
|||
a8714f9390
|
|||
7bbe824071
|
|||
1af7abff2e
|
|||
3afb198416
|
|||
28e09a1ccd
|
|||
1f059086d5
|
|||
e5123bb586
|
|||
b98dbaedeb
|
|||
4529c57995
|
|||
0b54e1b534
|
|||
57de662fff
|
|||
06df06a3ee
|
|||
bda85f9b44
|
|||
0f140e4763
|
|||
f93abe152b
|
|||
f46115e829
|
|||
c96df542d8
|
|||
eeae791f72
|
|||
7d4b255b4c
|
|||
87fc21ac5a
|
|||
11423be2bb
|
|||
61cd474450
|
|||
7eed38dc76
|
|||
98690a5e70
|
|||
24a33cfd08
|
|||
2c635164e7
|
|||
f0159bdea7
|
|||
4aee9266d5
|
|||
83358a5e22
|
|||
48a078cdf8
|
|||
7bbe21a680
|
|||
fd1123e2ec
|
|||
ab0e80ec86
|
|||
66e0253fdb
|
|||
6abb7d400d
|
|||
61cc5d50f9
|
|||
42cb3d5bf2
|
|||
6d1aa11feb
|
|||
7c02ca2ab5
|
|||
4425215ea9
|
|||
260dfc7602
|
|||
54a7fe64ba
|
|||
21c53d6bde
|
|||
4bb73e3b23
|
|||
808b1c668e
|
|||
850a129aa9
|
|||
a9d7256910
|
|||
0b9d2a72db
|
|||
0dbe5a613e
|
|||
45170887e9
|
|||
8087eb6814
|
|||
56b056252c
|
|||
a87d4d3887
|
|||
2392d278d2
|
|||
8c9306f166
|
|||
19ad2ab711
|
|||
c02431d925
|
|||
6b986a54a7
|
|||
fe50e3db15
|
|||
ba91136bef
|
|||
85c784e825
|
|||
b6f05876a5
|
|||
1f79b4a034
|
|||
924ee3a577
|
|||
79909cd3c5
|
|||
243eb1688d
|
|||
ca43384fb5
|
|||
bbdc09f0e5
|
|||
6df0acfe40
|
|||
f47bb51d01
|
|||
a6b4a17f87
|
|||
a11332626f
|
|||
2a16b07b67
|
|||
dde899d04d
|
|||
b2c3b97048
|
|||
5651c53ccb
|
|||
6e855ade31
|
|||
35737b6980
|
|||
f8e0706009
|
|||
dd8e477a0b
|
|||
6df566bb1c
|
|||
fab4224042
|
|||
85e22281b1
|
|||
f2d7c111e8
|
|||
4c9fddee42
|
|||
10c755775d
|
|||
00aaf83884
|
|||
3f8fef87e1
|
|||
a471b8bb42
|
|||
7f5f53c2c0
|
|||
fc98090339
|
|||
34bf041506
|
|||
ff6acb038e
|
|||
2d0d8e2dc6
|
|||
bea77808d9
|
|||
03734652cf
|
|||
afd1e2d8a2
|
|||
3a5056aa00
|
|||
15117e2356
|
|||
d96ba31524
|
|||
9386a6d00c
|
|||
e1dec22136
|
|||
e3ce020572
|
|||
b63506f809
|
|||
b5447dc0ec
|
|||
9c3f54e760
|
|||
96bb949ea8
|
|||
6713de89c3
|
|||
5e983c8cd8
|
|||
5c71a02006
|
|||
47c3ef9237
|
|||
38d1de7062
|
|||
10c9a0d65a
|
|||
aae089fd96
|
|||
ef1143bc6d
|
|||
09093392cd
|
|||
80ec188190
|
|||
49d978ae22
|
|||
e16a75ea23
|
|||
f0f4ce507a
|
|||
484c421f22
|
|||
3d5010e193
|
|||
6929583710
|
|||
e1c5113f87
|
|||
9009954996
|
|||
6e01bd92b9
|
|||
e5eacba0e7
|
|||
0d5b61b2f0
|
|||
a5657bc15a
|
|||
a7c1cee880
|
|||
5b19f3f67b
|
|||
8d597cff7b
|
|||
cf7ff6b2fb
|
|||
6994c1a24e
|
|||
88f8ad2685
|
|||
fedf2b4bb4
|
|||
0119f091f7
|
|||
1244054535
|
|||
7a4acbd36e
|
|||
f20c4979d0
|
|||
f9d8ff5651
|
|||
6150b653aa
|
|||
9131cd062d
|
|||
27649f31fc
|
|||
3e48b16a61
|
|||
f3ddb838ed
|
|||
27865a65c5
|
|||
5934f2f5a3
|
|||
9ec9bf0f7f
|
|||
1bed32f76e
|
|||
65d6c6c56a
|
|||
93b4793a27
|
|||
e1d1125bdc
|
|||
e45bc5eb5a
|
|||
3e652b2036
|
|||
37af04f008
|
|||
6c5180d2a5
|
|||
1222ce8752
|
|||
4e3a28164d
|
|||
a516d3f2d7
|
|||
bdf7487b45
|
|||
7bc9508e8e
|
|||
d3a00bef3e
|
|||
f53297b17f
|
|||
07dec180c7
|
|||
0ef4c4a779
|
|||
0fc1146b91
|
|||
b01b4e6a2d
|
|||
9181ac1c56
|
|||
43b66a6438
|
|||
d8e559b102
|
|||
909249db0e
|
|||
75eaff06d1
|
|||
0a7a727952
|
|||
0128bb4adf
|
|||
69158595ef
|
|||
d431bc228f
|
|||
e935a7d571
|
|||
8d2cbdacae
|
|||
451f2b202f
|
|||
140863d674
|
|||
9b3039bc36
|
|||
ca7061911c
|
|||
112d3679da
|
|||
785bd9b122
|
|||
c7bf5c6921
|
|||
e50986180e
|
|||
c83d94f702
|
|||
37d1a1d1a6
|
|||
2dcaa3d70c
|
|||
d02ab9825c
|
|||
04050f3f03
|
|||
5594afc4a6
|
|||
6db535883e
|
|||
4c957dd458
|
|||
7e4bc76015
|
|||
69126bc510
|
|||
f31e77a2f3
|
|||
f239dd1a46
|
|||
7f2963a725
|
|||
6af57fff8b
|
|||
3d089ceb46
|
|||
d8aeda7fe0
|
|||
645d3234d7
|
|||
d37bc58c11
|
|||
cc2e48c96f
|
|||
b03278ac86
|
|||
249d692d4e
|
|||
2872993628
|
|||
9f8cca411a
|
|||
150b6a209e
|
|||
aca60a2fde
|
|||
4fa5ec9dc5
|
|||
3cab876224
|
|||
5c905cde20
|
|||
ae1241c10e
|
|||
e090a1e3c9
|
|||
66537daed2
|
|||
1b6bc8f781
|
|||
e1c4d5aaaf
|
|||
63a63e7dae
|
|||
b43fe925ec
|
|||
17d0c52fad
|
|||
e8163f1c59
|
|||
7a5814d115
|
|||
8e1b3cdf8a
|
|||
b669a8c2c9
|
|||
e6283d1c55
|
|||
02cf04b81a
|
|||
94a2b35770
|
|||
22bbda6b19
|
|||
0b29388f1a
|
|||
a198991d2e
|
|||
cb4206c3b7
|
|||
a3b735f373
|
|||
10ac4091e6
|
|||
f7c25a0ed2
|
|||
8eded69aee
|
|||
0f8063445e
|
|||
e44a17bcb5
|
|||
b511bafe27
|
|||
d839c81603
|
|||
0d9d2cf207
|
|||
61f29fb7ba
|
|||
97609e5578
|
|||
d3c1bf694c
|
|||
18ef12d9b3
|
|||
db5e18f453
|
|||
31f19d9707
|
|||
80c675b14c
|
|||
3f9ec7c6c2
|
|||
179f81e023
|
|||
a4cc7bb99e
|
|||
edad7cd8d0
|
|||
0515dc0842
|
|||
42c2b93cdf
|
|||
fc0b3605b1
|
|||
4ab7e75b6d
|
|||
c70357a16d
|
|||
28c28c6493
|
|||
7c6b91187d
|
|||
f2f85e8ee2
|
|||
4a318c22e7
|
|||
faa0205eb5
|
|||
822704cd1d
|
|||
d787b25917
|
|||
47fb912c15
|
|||
6816f125eb
|
|||
470b3e0dab
|
|||
3b9791e648
|
|||
bb944b21a9
|
|||
fd6b5ee127
|
|||
0c15c77d01
|
|||
c6c579061c
|
|||
21f27e3bbe
|
|||
840e07d747
|
|||
c1f8f0ef50
|
|||
7fd533f056
|
|||
5aada55fd3
|
|||
b3a4c4bd78
|
|||
4ac3a57411
|
|||
e074365cc8
|
|||
620abb2ffe
|
|||
52aaa71f84
|
|||
3854200b18
|
|||
c58fa69e2e
|
|||
59fc5e5e10
|
|||
d3caf87696
|
|||
7e1094dd7c
|
|||
a7b9dd4102
|
|||
8c3a130e07
|
|||
a8e80b61de
|
|||
f916ab5df2
|
|||
df04f3c4ac
|
|||
0a79d2f3e3
|
|||
53aca071e4
|
|||
f03a028741
|
|||
26481a2b4e | |||
3936d7a95e | |||
3bcf78d4eb
|
|||
ceca79f39b
|
|||
bec0970a4a
|
|||
6eac725e9d
|
|||
f7787c592b
|
|||
8611e203c6
|
|||
1cb4a9acc4
|
|||
612a8bf7b1
|
|||
132b7b1b66
|
|||
ee064f521b
|
|||
6f2ec615ce
|
|||
9bc865e151
|
|||
6cb059ebce
|
|||
82ee7361bd
|
|||
bfa084fe7f
|
|||
da3db10d03
|
|||
dbcab30d6f
|
|||
e9bc1dcea3
|
|||
7afd1f0bca
|
|||
3e9e83e25d
|
|||
7315809914
|
|||
6c095843ba
|
|||
62954eb986
|
|||
a1e145871b
|
|||
e5d0b1a9fb
|
|||
51f2b2b145
|
|||
b20386462a
|
|||
0f0d67d419
|
|||
1aa25a743a
|
|||
b546988f93
|
|||
066d51317b
|
|||
4347b6c6dc
|
|||
5fae7d43fa
|
|||
8af861ce86
|
|||
3d68d0f053
|
|||
0a079ae34a
|
|||
9d3d463eb3
|
|||
e0ec68b51d
|
|||
19f1bc18e1
|
|||
8c587958b6
|
|||
4adcdcf4c6
|
|||
16d2fecf17
|
|||
5ba4eefe68
|
|||
2ce4cfb608
|
|||
498b28fac0
|
|||
b30d3257b5
|
|||
c8944e8ed6
|
|||
a47b6ea2f2
|
|||
046173ffa9
|
|||
d20366b6f1
|
|||
67f0c77a9e
|
|||
6bc7b88220
|
|||
9b71c84652
|
|||
3ca5bd7eca
|
|||
83a5237573
|
|||
dfd610380a
|
|||
356f6d0dea
|
|||
95d34fa1fa
|
|||
e63411caa6
|
|||
deeeeafe86
|
|||
bf1e867923
|
|||
95592cc89f
|
|||
7093779d4a
|
|||
f92fe121fd
|
|||
6278eb6df5
|
|||
02e18c0e52
|
|||
8b51d375e5
|
|||
79179aaaec
|
|||
c9c32da0b5
|
|||
5d353fd465
|
|||
8275fcad8d
|
|||
9b5249af54
|
|||
625abf91b3
|
|||
bba9808d9d
|
|||
9663725ef4
|
|||
482c4b312b
|
|||
8d6d39b1dd
|
|||
6801e4708e
|
|||
8d6daf9688
|
|||
d87e0d77da
|
|||
4cac6a24db
|
|||
8048fc7f9f
|
|||
739cf3c202
|
|||
584bf5f5ad
|
|||
41d12d4d05
|
|||
35ffc77056
|
|||
6ffb65fbc0
|
|||
ac575c5125
|
|||
9a2e0b3c7b
|
|||
ded7a858da
|
|||
d1ae208acf
|
|||
36ebbef6d7
|
|||
c26a952af9
|
|||
089010b33c
|
|||
46afabbae7
|
|||
195371b9bf
|
|||
4cad585fee
|
|||
766c90a5ba
|
|||
c1700f42dd
|
|||
7a39c2c962
|
|||
bfea159a0f
|
|||
b6cf812f49
|
|||
0c6a0d5b16
|
|||
114d30e1b5
|
|||
ae18d0757a | |||
4d57d69626
|
|||
d92d0ed883
|
|||
c651722b73
|
|||
596a3574df
|
|||
3643931bb2
|
|||
bf59061611
|
|||
0cb31915e3
|
|||
08d6685812
|
|||
cfb75d8765
|
|||
55b59a3364
|
|||
c66ee294fb
|
|||
975f9e16ba
|
|||
67148e651b
|
|||
b9cd780821
|
|||
c7b27b6432
|
|||
4024c2be7e
|
|||
55957a52f6
|
|||
65948ed0d3
|
|||
d6950dcae8
|
|||
1b4a6df380
|
|||
d760f00a82
|
|||
4a8be91d6a
|
|||
9771175244
|
|||
35f712aee6
|
|||
7970b871c1
|
|||
5c5a3b5517
|
|||
a9f4779308
|
|||
45bc0093aa
|
|||
02c70010bd
|
|||
9442cba158
|
|||
cd8d2d34db
|
|||
a6b67f8ad9
|
|||
92bf19a70e
|
|||
f25fefc9c0
|
|||
1a72377b51
|
|||
e1aa41d1d3
|
0
.ansible/.lock
Normal file
0
.ansible/.lock
Normal file
107
.bashrc
107
.bashrc
@ -6,10 +6,13 @@ HISTFILESIZE=2000 # Adjusted to match both histfile and size criteria
|
||||
# Docker Compose Alias (Mostly for old shell scripts)
|
||||
alias docker-compose='docker compose'
|
||||
|
||||
# Home Manager Configuration
|
||||
alias hm='cd $HOME/dotfiles/config/home-manager/ && home-manager'
|
||||
alias hmnews='hm news --flake .#$DOTF_HOSTNAME'
|
||||
alias hmup='hm switch --flake .#$DOTF_HOSTNAME --impure'
|
||||
# tatool aliases
|
||||
alias tls='tatool ls -g'
|
||||
alias tps='tls'
|
||||
alias ti='tatool doctor'
|
||||
alias td='tatool doctor'
|
||||
alias tr='tatool restart'
|
||||
alias tsrc='tatool source'
|
||||
|
||||
# Modern tools aliases
|
||||
alias l="eza --header --long --git --group-directories-first --group --icons --color=always --sort=name --hyperlink -o --no-permissions"
|
||||
@ -19,6 +22,7 @@ alias cat='bat'
|
||||
alias du='dust'
|
||||
alias df='duf'
|
||||
alias rm="trash-put"
|
||||
alias augp='sudo apt update && sudo apt upgrade -y && sudo apt autopurge -y && sudo apt autoclean'
|
||||
|
||||
# Docker Aliases
|
||||
alias d='docker'
|
||||
@ -29,8 +33,10 @@ alias dcd='docker compose down'
|
||||
alias dcu='docker compose up'
|
||||
alias dcp='docker compose ps'
|
||||
alias dcps='docker compose ps'
|
||||
alias dcr='docker compose run'
|
||||
alias dcpr='dcp && dcd && dcu -d && dcl -f'
|
||||
alias dcr='dcd && dcu -d && dcl -f'
|
||||
alias ddpul='docker compose down && docker compose pull && docker compose up -d && docker compose logs -f'
|
||||
alias docker-nuke='docker kill $(docker ps -q) && docker rm $(docker ps -a -q) && docker system prune --all --volumes --force && docker volume prune --force'
|
||||
|
||||
# Git aliases
|
||||
alias g='git'
|
||||
@ -45,23 +51,40 @@ alias gcm='git commit -m'
|
||||
alias gco='git checkout'
|
||||
alias gcb='git checkout -b'
|
||||
|
||||
# NodeJS aliases
|
||||
alias node='node-20'
|
||||
alias npm='npm-20'
|
||||
|
||||
# Kubernetes aliases (Minikube)
|
||||
alias kubectl="minikube kubectl --"
|
||||
|
||||
# netstat port in use check
|
||||
alias port='netstat -atupn | grep LISTEN'
|
||||
|
||||
# random string (Syntax: random <length>)
|
||||
alias random='openssl rand -base64'
|
||||
|
||||
# Alias for ls to l but only if it's an interactive shell because we don't want to override ls in scripts which could blow up in our face
|
||||
if [ -t 1 ]; then
|
||||
alias ls='l'
|
||||
fi
|
||||
|
||||
# Alias for ssh.exe and ssh-add.exe on Windows WSL (microsoft-standard-WSL2)
|
||||
if [[ $(uname -a) == *"microsoft-standard-WSL2"* ]]; then
|
||||
alias op='op.exe'
|
||||
fi
|
||||
|
||||
# PATH Manipulation
|
||||
export DOTFILES_PATH=$HOME/.dotfiles
|
||||
export PATH=$PATH:$HOME/.local/bin
|
||||
export PATH=$PATH:$HOME/.cargo/bin
|
||||
export PATH=$PATH:$HOME/dotfiles/bin
|
||||
export PATH=$PATH:$DOTFILES_PATH/bin
|
||||
|
||||
# In case $HOME/.flutter/flutter/bin is found, we can add it to the PATH
|
||||
if [ -d "$HOME/.flutter/flutter/bin" ]; then
|
||||
export PATH=$PATH:$HOME/.flutter/flutter/bin
|
||||
export PATH="$PATH":"$HOME/.pub-cache/bin"
|
||||
|
||||
# Flutter linux fixes:
|
||||
export CPPFLAGS="-I/usr/include"
|
||||
export LDFLAGS="-L/usr/lib/x86_64-linux-gnu -lbz2"
|
||||
export PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig:$PKG_CONFIG_PATH
|
||||
fi
|
||||
|
||||
# Add flatpak to XDG_DATA_DIRS
|
||||
export XDG_DATA_DIRS=$XDG_DATA_DIRS:/usr/share:/var/lib/flatpak/exports/share:$HOME/.local/share/flatpak/exports/share
|
||||
@ -72,21 +95,14 @@ export NIXPKGS_ALLOW_UNFREE=1
|
||||
# Allow insecure nixpkgs
|
||||
export NIXPKGS_ALLOW_INSECURE=1
|
||||
|
||||
# Set DOTF_HOSTNAME to the hostname from .hostname file
|
||||
# If this file doesn't exist, use mennos-unknown-hostname
|
||||
export DOTF_HOSTNAME="mennos-unknown-hostname"
|
||||
if [ -f $HOME/.hostname ]; then
|
||||
export DOTF_HOSTNAME=$(cat $HOME/.hostname)
|
||||
fi
|
||||
# 1Password SSH Agent
|
||||
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
|
||||
|
||||
# Tradaware / DiscountOffice Configuration
|
||||
if [ -d "/home/menno/Projects/Work" ]; then
|
||||
export TRADAWARE_DEVOPS=true
|
||||
fi
|
||||
|
||||
# Flutter Web and other tools that require Chrome
|
||||
export CHROME_EXECUTABLE=$(which brave)
|
||||
|
||||
# 1Password Source Plugin (Assuming bash compatibility)
|
||||
if [ -f /home/menno/.config/op/plugins.sh ]; then
|
||||
source /home/menno/.config/op/plugins.sh
|
||||
@ -101,43 +117,25 @@ else
|
||||
eval "$(starship init bash)"
|
||||
fi
|
||||
|
||||
# Read .op_sat
|
||||
if [ -f ~/.op_sat ]; then
|
||||
export OP_SERVICE_ACCOUNT_TOKEN=$(cat ~/.op_sat)
|
||||
|
||||
# Ensure .op_sat is 0600 and only readable by the owner
|
||||
if [ "$(stat -c %a ~/.op_sat)" != "600" ]; then
|
||||
echo "WARNING: ~/.op_sat is not 0600, please fix this!"
|
||||
fi
|
||||
|
||||
if [ "$(stat -c %U ~/.op_sat)" != "$(whoami)" ]; then
|
||||
echo "WARNING: ~/.op_sat is not owned by the current user, please fix this!"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Source nix home-manager
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/hm-session-vars.sh"
|
||||
fi
|
||||
|
||||
# Source agent-bridge script for 1password
|
||||
source $HOME/dotfiles/bin/1password-agent-bridge.sh
|
||||
|
||||
# zoxide if available
|
||||
if command -v zoxide &> /dev/null; then
|
||||
eval "$(zoxide init bash)"
|
||||
fi
|
||||
|
||||
# Check if we are running from zellij, if not then launch it
|
||||
launch_zellij_conditionally() {
|
||||
if [ -z "$ZELLIJ" ]; then
|
||||
# Don't launch zellij in tmux, vscode, screen or zeditor.
|
||||
if [ ! -t 1 ] || [ -n "$TMUX" ] || [ -n "$VSCODE_STABLE" ] || [ -n "$STY" ] || [ -n "$ZED_TERM" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Launch zellij
|
||||
zellij
|
||||
|
||||
# Exit if zellij exits properly with a zero exit code
|
||||
if [ $? -eq 0 ]; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
echo "Zellij exited with a non-zero exit code, falling back to regular shell."
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
# Disabled for now, I don't like the way it behaves but I don't want to remove it either
|
||||
# launch_zellij_conditionally
|
||||
|
||||
# Source ble.sh if it exists
|
||||
if [[ -f "${HOME}/.nix-profile/share/blesh/ble.sh" ]]; then
|
||||
source "${HOME}/.nix-profile/share/blesh/ble.sh"
|
||||
@ -161,6 +159,11 @@ if [[ -f "${HOME}/.nix-profile/share/blesh/ble.sh" ]]; then
|
||||
bind -x '"\C-r": fzf_history_search'
|
||||
fi
|
||||
|
||||
# In case a basrc.local exists, source it
|
||||
if [ -f $HOME/.bashrc.local ]; then
|
||||
source $HOME/.bashrc.local
|
||||
fi
|
||||
|
||||
# Display a welcome message for interactive shells
|
||||
if [ -t 1 ]; then
|
||||
dotf hello
|
||||
|
37
.github/workflows/ansible.yml
vendored
Normal file
37
.github/workflows/ansible.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
name: Ansible Lint Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
check-ansible:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install Ansible and ansible-lint
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install ansible ansible-lint
|
||||
|
||||
- name: Run ansible-lint
|
||||
run: |
|
||||
if [ ! -d "config/ansible" ]; then
|
||||
echo "No ansible directory found at config/ansible"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
found_files=$(find config/ansible -name "*.yml" -o -name "*.yaml")
|
||||
if [ -z "$found_files" ]; then
|
||||
echo "No Ansible files found in config/ansible to lint"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ansible-lint $found_files
|
42
.github/workflows/python.yml
vendored
Normal file
42
.github/workflows/python.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
name: Python Lint Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
check-python:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install Python linting tools
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install pylint black
|
||||
|
||||
- name: Run pylint
|
||||
run: |
|
||||
python_files=$(find . -name "*.py" -type f)
|
||||
if [ -z "$python_files" ]; then
|
||||
echo "No Python files found to lint"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
pylint $python_files
|
||||
|
||||
- name: Check Black formatting
|
||||
run: |
|
||||
python_files=$(find . -name "*.py" -type f)
|
||||
if [ -z "$python_files" ]; then
|
||||
echo "No Python files found to lint"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
black --check $python_files
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -7,4 +7,7 @@ logs/*
|
||||
secrets/**/*.*
|
||||
|
||||
# SHA256 hashes of the encrypted secrets
|
||||
*.sha256
|
||||
*.sha256
|
||||
|
||||
# python cache
|
||||
**/__pycache__/
|
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@ -6,5 +6,9 @@
|
||||
"**/CVS": true,
|
||||
"**/.DS_Store": true,
|
||||
"**/*.sha256": true,
|
||||
}
|
||||
},
|
||||
"files.associations": {
|
||||
"*.yml": "ansible"
|
||||
},
|
||||
"ansible.python.interpreterPath": "/usr/bin/python3"
|
||||
}
|
64
README.md
64
README.md
@ -1,31 +1,30 @@
|
||||
# Setup
|
||||
|
||||
This dotfiles is intended to be used with NixOS 24.05
|
||||
Please install a clean version of NixOS GNOME and then follow the steps below.
|
||||
This dotfiles is intended to be used with either Fedora 40>, Ubuntu 20.04> or Arch Linux.
|
||||
Please install a clean version of either distro with GNOME and then follow the steps below.
|
||||
|
||||
## Installation
|
||||
|
||||
### 0. Install NixOS
|
||||
### 0. Install distro
|
||||
|
||||
Download the latest NixOS ISO from the [NixOS website](https://nixos.org/download.html) and write it to a USB stick.
|
||||
I'd recommend getting the GNOME version as it's easier to setup and you can select minimal from the installer anyway if you want to just setup a headless server.
|
||||
Download the latest ISO from your desired distro and write it to a USB stick.
|
||||
I'd recommend getting the GNOME version as it's easier to setup unless you're planning on setting up a server, in that case I recommend getting the server ISO for the specific distro.
|
||||
|
||||
#### Note: If you intend on using a desktop environment you should select the GNOME version as this dotfiles repository expects the GNOME desktop environment for various configurations
|
||||
|
||||
### 1. Clone dotfiles to home directory
|
||||
|
||||
Open a nix-shell with git and begin the setup process. This setup will prompt you various questions such as your desired hostname and if the system you are installing is supposed to be a server or workstation.
|
||||
Open a shell and begin the setup process. This setup requires you to provide a hostname as a parameter. You can use an existing hostname to restore an old system or choose a new name.
|
||||
|
||||
Feel free to use an exisiting hostname to restore an old system or chose a new name.
|
||||
|
||||
If you are running this in a VM be sure to answer yes if it prompts you. This will ensure it generates the correct boot loader configuration.
|
||||
If you are running this in a VM be sure to answer yes if it prompts you.
|
||||
|
||||
```bash
|
||||
nix-shell -p git
|
||||
curl -L https://df.mvl.sh | bash
|
||||
curl -L https://df.mvl.sh | bash -s your-hostname
|
||||
```
|
||||
|
||||
### 2. Reboot
|
||||
Replace `your-hostname` with your desired hostname for this machine.
|
||||
|
||||
### 2. Relog/Reboot
|
||||
|
||||
It's probably a good idea that you either reboot or log out and log back in to make sure all the changes are applied.
|
||||
|
||||
@ -33,7 +32,11 @@ It's probably a good idea that you either reboot or log out and log back in to m
|
||||
# sudo reboot
|
||||
```
|
||||
|
||||
### 3. Run `dotf update`
|
||||
### 3. Create ~/.op_sat (Optional)
|
||||
|
||||
For servers you can place a file `~/.op_sat` with your 1Password Service Access Token, this can then be used by Ansible to fetch secrets for services. This is mostly for server systems so you're able to skip it for workstations.
|
||||
|
||||
### 4. Run `dotf update`
|
||||
|
||||
Run the `dotf update` command, although the setup script did most of the work some symlinks still need to be set which at the moment is done using shell scripts.
|
||||
|
||||
@ -41,10 +44,6 @@ Run the `dotf update` command, although the setup script did most of the work so
|
||||
dotf update
|
||||
```
|
||||
|
||||
### 4. Setup 1Password (Optional)
|
||||
|
||||
1Password is installed but you need to login and enable the SSH agent and CLI components under the settings before continuing.
|
||||
|
||||
### 5. Decrypt secrets
|
||||
|
||||
Either using 1Password or by manualling providing the decryption key you should decrypt the secrets.
|
||||
@ -65,11 +64,38 @@ You should now have a fully setup system with all the configurations applied.
|
||||
Here are some paths that contain files named after the hostname of the system.
|
||||
If you add a new system you should add the relevant files to these paths.
|
||||
|
||||
- `nconfig/nixos/hardware/`: Contains the hardware configurations for the different systems.
|
||||
- `config/ssh/authorized_keys`: Contains the public keys per hostname that will be symlinked to the `~/.ssh/authorized_keys` file.
|
||||
- `config/nixos/flake.nix`: Contains an array `nixosConfigurations` where you should be adding the new system hostname and relevant configuration.
|
||||
- `config/home-manager/flake.nix`: Contains an array `homeConfigurations` where you should be adding the new system hostname and relevant configuration.
|
||||
|
||||
### Server reboots
|
||||
|
||||
In case you reboot a server, it's likely that this runs JuiceFS.
|
||||
To be sure that every service is properly accessing JuiceFS mounted files you should probably restart the services once when the server comes online.
|
||||
```bash
|
||||
dotf service stop --all
|
||||
df # confirm JuiceFS is mounted
|
||||
dotf service start --all
|
||||
```
|
||||
|
||||
### Object Storage (Servers only)
|
||||
|
||||
In case you need to adjust anything regarding the /mnt/object_storage JuiceFS.
|
||||
Ensure to shut down all services:
|
||||
```bash
|
||||
dotf service stop --all
|
||||
```
|
||||
|
||||
Unmount the volume:
|
||||
```bash
|
||||
sudo systemctl stop juicefs
|
||||
```
|
||||
|
||||
And optionally if you're going to do something with metadata you might need to stop redis too.
|
||||
```bash
|
||||
cd ~/services/juicefs-redis/
|
||||
docker compose down --remove-orphans
|
||||
```
|
||||
|
||||
### Adding a new system
|
||||
|
||||
To add a new system you should follow these steps:
|
||||
|
@ -1,21 +0,0 @@
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
export SSH_AUTH_SOCK=$HOME/.1password/agent.sock
|
||||
|
||||
# Check if is_wsl function returns true, don't continue if we are not on WSL
|
||||
if ! is_wsl; then
|
||||
return
|
||||
fi
|
||||
|
||||
printfe "%s" "cyan" "Running in WSL, ensuring 1Password SSH-Agent relay is running..."
|
||||
ALREADY_RUNNING=$(ps -auxww | grep -q "[n]piperelay.exe -ei -s //./pipe/openssh-ssh-agent"; echo $?)
|
||||
if [[ $ALREADY_RUNNING != "0" ]]; then
|
||||
if [[ -S $SSH_AUTH_SOCK ]]; then
|
||||
rm $SSH_AUTH_SOCK
|
||||
fi
|
||||
|
||||
(setsid socat UNIX-LISTEN:$SSH_AUTH_SOCK,fork EXEC:"npiperelay.exe -ei -s //./pipe/openssh-ssh-agent",nofork &) >/dev/null 2>&1
|
||||
printfe "%s\n" "green" " [ Started ]"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
printfe "%s\n" "green" " [ Already running ]"
|
88
bin/actions/auto-start.py
Executable file
88
bin/actions/auto-start.py
Executable file
@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, run_command
|
||||
|
||||
|
||||
def check_command_exists(command):
|
||||
"""Check if a command is available in the system"""
|
||||
try:
|
||||
subprocess.run(
|
||||
["which", command],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def list_screen_sessions():
|
||||
"""List all screen sessions"""
|
||||
success, output = run_command(["screen", "-ls"])
|
||||
return output
|
||||
|
||||
|
||||
def wipe_dead_sessions():
|
||||
"""Check and clean up dead screen sessions"""
|
||||
screen_list = list_screen_sessions()
|
||||
if "Dead" in screen_list:
|
||||
print("Found dead sessions, cleaning up...")
|
||||
run_command(["screen", "-wipe"])
|
||||
|
||||
|
||||
def is_app_running(app_name):
|
||||
"""Check if an app is already running in a screen session"""
|
||||
screen_list = list_screen_sessions()
|
||||
return app_name in screen_list
|
||||
|
||||
|
||||
def start_app(app_name, command):
|
||||
"""Start an application in a screen session"""
|
||||
printfe("green", f"Starting {app_name} with command: {command}...")
|
||||
run_command(["screen", "-dmS", app_name] + command.split())
|
||||
time.sleep(1) # Give it a moment to start
|
||||
|
||||
|
||||
def main():
|
||||
# Define dictionary with app_name => command mapping
|
||||
apps = {
|
||||
"vesktop": "vesktop",
|
||||
"ktailctl": "flatpak run org.fkoehler.KTailctl",
|
||||
"ulauncher": "ulauncher --no-window-shadow --hide-window",
|
||||
"nemo-desktop": "nemo-desktop",
|
||||
}
|
||||
|
||||
# Clean up dead sessions if any
|
||||
wipe_dead_sessions()
|
||||
|
||||
print("Starting auto-start applications...")
|
||||
for app_name, command in apps.items():
|
||||
# Get the binary name (first part of the command)
|
||||
command_binary = command.split()[0]
|
||||
|
||||
# Check if the command exists
|
||||
if check_command_exists(command_binary):
|
||||
# Check if the app is already running
|
||||
if is_app_running(app_name):
|
||||
printfe("yellow", f"{app_name} is already running. Skipping...")
|
||||
continue
|
||||
|
||||
# Start the application
|
||||
start_app(app_name, command)
|
||||
|
||||
# Display screen sessions
|
||||
print(list_screen_sessions())
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
apps=(
|
||||
"spotify"
|
||||
"whatsapp-for-linux"
|
||||
"telegram-desktop"
|
||||
"vesktop"
|
||||
"trayscale"
|
||||
"1password"
|
||||
"ulauncher-wrapped --no-window-shadow --hide-window"
|
||||
"polkit-agent"
|
||||
"swaync"
|
||||
"nm-applet"
|
||||
"blueman-applet"
|
||||
)
|
||||
|
||||
# check if screen has any dead sessions
|
||||
if screen -list | grep -q "Dead"; then
|
||||
screen -wipe
|
||||
fi
|
||||
|
||||
echo "Starting auto-start applications..."
|
||||
for app in "${apps[@]}"; do
|
||||
app_name=$(echo $app | awk '{print $1}')
|
||||
app_params=$(echo $app | cut -d' ' -f2-)
|
||||
|
||||
if [ -x "$(command -v $app_name)" ]; then
|
||||
if screen -list | grep -q $app_name; then
|
||||
echo "$app_name is already running. Skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Starting $app_name with parameters $app_params..."
|
||||
screen -dmS $app_name $app_name $app_params
|
||||
sleep 1
|
||||
fi
|
||||
done
|
191
bin/actions/hello.py
Executable file
191
bin/actions/hello.py
Executable file
@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, logo, _rainbow_color, COLORS
|
||||
|
||||
|
||||
def get_last_ssh_login():
|
||||
"""Get information about the last SSH login"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["lastlog", "-u", os.environ.get("USER", "")],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# If lastlog didn't work try lastlog2
|
||||
if result.returncode != 0:
|
||||
result = subprocess.run(
|
||||
["lastlog2", os.environ.get("USER", "")], capture_output=True, text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
lines = result.stdout.strip().split("\n")
|
||||
if len(lines) >= 2: # Header line + data line
|
||||
# Parse the last login line - example format:
|
||||
# menno ssh 100.99.23.98 Mon Mar 10 19:09:43 +0100 2025
|
||||
parts = lines[1].split()
|
||||
if len(parts) >= 7 and "ssh" in parts[1]: # Check if it's an SSH login
|
||||
# Extract IP address from the third column
|
||||
ip = parts[2]
|
||||
# Time is the rest of the line starting from position 3
|
||||
time_str = " ".join(parts[3:])
|
||||
return f"{COLORS['cyan']}Last SSH login{COLORS['reset']}{COLORS['yellow']} {time_str}{COLORS['cyan']} from{COLORS['yellow']} {ip}"
|
||||
return None
|
||||
except Exception as e:
|
||||
# For debugging, you might want to print the exception
|
||||
# print(f"Error getting SSH login: {str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
def check_dotfiles_status():
|
||||
"""Check if the dotfiles repository is dirty"""
|
||||
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
|
||||
try:
|
||||
if not os.path.isdir(os.path.join(dotfiles_path, ".git")):
|
||||
return None
|
||||
|
||||
# Check for git status details
|
||||
status = {
|
||||
"is_dirty": False,
|
||||
"untracked": 0,
|
||||
"modified": 0,
|
||||
"staged": 0,
|
||||
"commit_hash": "",
|
||||
"unpushed": 0,
|
||||
}
|
||||
|
||||
# Get status of files
|
||||
result = subprocess.run(
|
||||
["git", "status", "--porcelain"],
|
||||
cwd=dotfiles_path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.stdout.strip():
|
||||
status["is_dirty"] = True
|
||||
for line in result.stdout.splitlines():
|
||||
if line.startswith("??"):
|
||||
status["untracked"] += 1
|
||||
if line.startswith(" M") or line.startswith("MM"):
|
||||
status["modified"] += 1
|
||||
if line.startswith("M ") or line.startswith("A "):
|
||||
status["staged"] += 1
|
||||
|
||||
# Get current commit hash
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--short", "HEAD"],
|
||||
cwd=dotfiles_path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
status["commit_hash"] = result.stdout.strip()
|
||||
|
||||
# Count unpushed commits
|
||||
# Fix: Remove capture_output and set stdout explicitly
|
||||
result = subprocess.run(
|
||||
["git", "log", "--oneline", "@{u}.."],
|
||||
cwd=dotfiles_path,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
status["unpushed"] = len(result.stdout.splitlines())
|
||||
|
||||
return status
|
||||
except Exception as e:
|
||||
print(f"Error checking dotfiles status: {str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
def get_condensed_status():
|
||||
"""Generate a condensed status line for trash and git status"""
|
||||
status_parts = []
|
||||
|
||||
# Check trash status
|
||||
trash_path = os.path.expanduser("~/.local/share/Trash/files")
|
||||
try:
|
||||
if os.path.exists(trash_path):
|
||||
items = os.listdir(trash_path)
|
||||
count = len(items)
|
||||
if count > 0:
|
||||
status_parts.append(f"[!] {count} file(s) in trash")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check dotfiles status
|
||||
dotfiles_status = check_dotfiles_status()
|
||||
if dotfiles_status is not None:
|
||||
if dotfiles_status["is_dirty"]:
|
||||
status_parts.append(f"{COLORS['yellow']}dotfiles is dirty{COLORS['reset']}")
|
||||
status_parts.append(
|
||||
f"{COLORS['red']}[{dotfiles_status['untracked']}] untracked{COLORS['reset']}"
|
||||
)
|
||||
status_parts.append(
|
||||
f"{COLORS['yellow']}[{dotfiles_status['modified']}] modified{COLORS['reset']}"
|
||||
)
|
||||
status_parts.append(
|
||||
f"{COLORS['green']}[{dotfiles_status['staged']}] staged{COLORS['reset']}"
|
||||
)
|
||||
|
||||
if dotfiles_status["commit_hash"]:
|
||||
status_parts.append(
|
||||
f"{COLORS['white']}[{COLORS['blue']}{dotfiles_status['commit_hash']}{COLORS['white']}]{COLORS['reset']}"
|
||||
)
|
||||
|
||||
if dotfiles_status["unpushed"] > 0:
|
||||
status_parts.append(
|
||||
f"{COLORS['yellow']}[!] You have {dotfiles_status['unpushed']} commit(s) to push{COLORS['reset']}"
|
||||
)
|
||||
else:
|
||||
status_parts.append("Unable to check dotfiles status")
|
||||
|
||||
if status_parts:
|
||||
return " - ".join(status_parts)
|
||||
return None
|
||||
|
||||
|
||||
def welcome():
|
||||
"""Display welcome message with hostname and username"""
|
||||
print()
|
||||
|
||||
# Get hostname and username
|
||||
hostname = os.uname().nodename
|
||||
username = os.environ.get("USER", os.environ.get("USERNAME", "user"))
|
||||
|
||||
# Get SSH login info first
|
||||
ssh_login = get_last_ssh_login()
|
||||
|
||||
print(f"{COLORS['cyan']}You're logged in on [", end="")
|
||||
print(_rainbow_color(hostname), end="")
|
||||
print(f"{COLORS['cyan']}] as [", end="")
|
||||
print(_rainbow_color(username), end="")
|
||||
print(f"{COLORS['cyan']}]{COLORS['reset']}")
|
||||
|
||||
# Display last SSH login info if available
|
||||
if ssh_login:
|
||||
print(f"{ssh_login}{COLORS['reset']}")
|
||||
|
||||
# Display condensed status line
|
||||
condensed_status = get_condensed_status()
|
||||
if condensed_status:
|
||||
print(f"{COLORS['yellow']}{condensed_status}{COLORS['reset']}")
|
||||
|
||||
|
||||
def main():
|
||||
logo(continue_after=True)
|
||||
welcome()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
welcome() {
|
||||
echo
|
||||
tput setaf 6
|
||||
printf "You're logged in on ["
|
||||
printf $HOSTNAME | lolcat
|
||||
tput setaf 6
|
||||
printf "] as "
|
||||
printf "["
|
||||
printf $USER | lolcat
|
||||
tput setaf 6
|
||||
printf "]\n"
|
||||
tput sgr0
|
||||
}
|
||||
|
||||
logo continue
|
||||
welcome
|
30
bin/actions/help.py
Executable file
30
bin/actions/help.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, println, logo
|
||||
|
||||
|
||||
def main():
|
||||
# Print logo
|
||||
logo(continue_after=True)
|
||||
|
||||
# Print help
|
||||
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
|
||||
try:
|
||||
with open(f"{dotfiles_path}/bin/resources/help.txt", "r") as f:
|
||||
help_text = f.read()
|
||||
print(help_text)
|
||||
except Exception as e:
|
||||
printfe("red", f"Error reading help file: {e}")
|
||||
return 1
|
||||
|
||||
println(" ", "cyan")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
# Print logo
|
||||
logo
|
||||
|
||||
# Print help
|
||||
cat $HOME/dotfiles/bin/resources/help.txt
|
||||
println " " "cyan"
|
179
bin/actions/lint.py
Executable file
179
bin/actions/lint.py
Executable file
@ -0,0 +1,179 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__))))
|
||||
from helpers.functions import printfe, command_exists
|
||||
|
||||
DOTFILES_ROOT = os.path.expanduser("~/.dotfiles")
|
||||
|
||||
|
||||
def lint_ansible(fix=False):
|
||||
"""Run ansible-lint on Ansible files"""
|
||||
ansible_dir = os.path.join(DOTFILES_ROOT, "config/ansible")
|
||||
|
||||
if not os.path.isdir(ansible_dir):
|
||||
printfe("yellow", "No ansible directory found at config/ansible")
|
||||
return 0
|
||||
|
||||
# Find all YAML files in the ansible directory
|
||||
yaml_files = []
|
||||
for ext in [".yml", ".yaml"]:
|
||||
yaml_files.extend(list(Path(ansible_dir).glob(f"**/*{ext}")))
|
||||
|
||||
if not yaml_files:
|
||||
printfe("yellow", "No Ansible files found in config/ansible to lint")
|
||||
return 0
|
||||
|
||||
if not command_exists("ansible-lint"):
|
||||
printfe(
|
||||
"red",
|
||||
"ansible-lint is not installed. Please install it with pip or your package manager.",
|
||||
)
|
||||
return 1
|
||||
|
||||
printfe("blue", f"Running ansible-lint{' with auto-fix' if fix else ''}...")
|
||||
files_to_lint = [str(f) for f in yaml_files]
|
||||
|
||||
command = ["ansible-lint"]
|
||||
if fix:
|
||||
command.append("--fix")
|
||||
command.extend(files_to_lint)
|
||||
|
||||
result = subprocess.run(command, check=False)
|
||||
return result.returncode
|
||||
|
||||
|
||||
def lint_nix():
|
||||
"""Run nixfmt on Nix files"""
|
||||
nix_files = list(Path(DOTFILES_ROOT).glob("**/*.nix"))
|
||||
|
||||
if not nix_files:
|
||||
printfe("yellow", "No Nix files found to lint")
|
||||
return 0
|
||||
|
||||
if not command_exists("nixfmt"):
|
||||
printfe(
|
||||
"red",
|
||||
"nixfmt is not installed. Please install it with nix-env or your package manager.",
|
||||
)
|
||||
return 1
|
||||
|
||||
printfe("blue", "Running nixfmt...")
|
||||
exit_code = 0
|
||||
for nix_file in nix_files:
|
||||
printfe("cyan", f"Formatting {nix_file}")
|
||||
result = subprocess.run(["nixfmt", str(nix_file)], check=False)
|
||||
if result.returncode != 0:
|
||||
exit_code = 1
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
def lint_python(fix=False):
|
||||
"""Run pylint and black on Python files"""
|
||||
python_files = list(Path(DOTFILES_ROOT).glob("**/*.py"))
|
||||
|
||||
if not python_files:
|
||||
printfe("yellow", "No Python files found to lint")
|
||||
return 0
|
||||
|
||||
exit_code = 0
|
||||
|
||||
# Check for pylint
|
||||
if command_exists("pylint"):
|
||||
printfe("blue", "Running pylint...")
|
||||
files_to_lint = [str(f) for f in python_files]
|
||||
result = subprocess.run(["pylint"] + files_to_lint, check=False)
|
||||
if result.returncode != 0:
|
||||
exit_code = 1
|
||||
else:
|
||||
printfe("yellow", "pylint is not installed. Skipping Python linting.")
|
||||
|
||||
# Check for black
|
||||
if command_exists("black"):
|
||||
printfe(
|
||||
"blue", f"Running black{'--check' if not fix else ''} on Python files..."
|
||||
)
|
||||
black_args = ["black"]
|
||||
if not fix:
|
||||
black_args.append("--check")
|
||||
black_args.extend([str(f) for f in python_files])
|
||||
|
||||
result = subprocess.run(black_args, check=False)
|
||||
if result.returncode != 0:
|
||||
exit_code = 1
|
||||
else:
|
||||
printfe("yellow", "black is not installed. Skipping Python formatting.")
|
||||
|
||||
if not command_exists("pylint") and not command_exists("black"):
|
||||
printfe(
|
||||
"red",
|
||||
"Neither pylint nor black is installed. Please run: `pip install pylint black`",
|
||||
)
|
||||
return 1
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Entry point for running linters on dotfiles.
|
||||
|
||||
This function parses command-line arguments to determine which linters to run
|
||||
and whether to apply auto-fixes. It supports running linters for Ansible, Nix,
|
||||
and Python files. If no specific linter is specified, all linters are executed.
|
||||
|
||||
Command-line arguments:
|
||||
--ansible: Run only ansible-lint.
|
||||
--nix: Run only nixfmt.
|
||||
--python: Run only Python linters (pylint, black).
|
||||
--fix: Auto-fix issues where possible.
|
||||
|
||||
Returns:
|
||||
int: Exit code indicating the success or failure of the linting process.
|
||||
A non-zero value indicates that one or more linters reported issues.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description="Run linters on dotfiles")
|
||||
parser.add_argument("--ansible", action="store_true", help="Run only ansible-lint")
|
||||
parser.add_argument("--nix", action="store_true", help="Run only nixfmt")
|
||||
parser.add_argument(
|
||||
"--python", action="store_true", help="Run only Python linters (pylint, black)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--fix", action="store_true", help="Auto-fix issues where possible"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# If no specific linter is specified, run all
|
||||
run_ansible = args.ansible or not (args.ansible or args.nix or args.python)
|
||||
run_nix = args.nix or not (args.ansible or args.nix or args.python)
|
||||
run_python = args.python or not (args.ansible or args.nix or args.python)
|
||||
|
||||
exit_code = 0
|
||||
|
||||
if run_ansible:
|
||||
ansible_result = lint_ansible(fix=args.fix)
|
||||
if ansible_result != 0:
|
||||
exit_code = ansible_result
|
||||
|
||||
if run_nix:
|
||||
nix_result = lint_nix()
|
||||
if nix_result != 0:
|
||||
exit_code = nix_result
|
||||
|
||||
if run_python:
|
||||
python_result = lint_python(fix=args.fix)
|
||||
if python_result != 0:
|
||||
exit_code = python_result
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
185
bin/actions/secrets.py
Executable file
185
bin/actions/secrets.py
Executable file
@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import hashlib
|
||||
import glob
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, run_command
|
||||
|
||||
|
||||
def get_password():
|
||||
"""Get password from 1Password"""
|
||||
op_cmd = "op"
|
||||
|
||||
# Try to get the password
|
||||
success, output = run_command(
|
||||
[op_cmd, "read", "op://j7nmhqlsjmp2r6umly5t75hzb4/Dotfiles Secrets/password"]
|
||||
)
|
||||
|
||||
if not success:
|
||||
printfe("red", "Failed to fetch password from 1Password.")
|
||||
return None
|
||||
|
||||
# Check if we need to use a token
|
||||
if "use 'op item get" in output:
|
||||
# Extract the token
|
||||
token = output.split("use 'op item get ")[1].split(" --")[0]
|
||||
printfe("cyan", f"Got fetch token: {token}")
|
||||
|
||||
# Use the token to get the actual password
|
||||
success, password = run_command(
|
||||
[op_cmd, "item", "get", token, "--reveal", "--fields", "password"]
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
return password
|
||||
else:
|
||||
# We already got the password
|
||||
return output
|
||||
|
||||
|
||||
def prompt_for_password():
|
||||
"""Ask for password manually"""
|
||||
import getpass
|
||||
|
||||
printfe("cyan", "Enter the password manually: ")
|
||||
password = getpass.getpass("")
|
||||
|
||||
if not password:
|
||||
printfe("red", "Password cannot be empty.")
|
||||
sys.exit(1)
|
||||
|
||||
printfe("green", "Password entered successfully.")
|
||||
return password
|
||||
|
||||
|
||||
def calculate_checksum(file_path):
|
||||
"""Calculate SHA256 checksum of a file"""
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b""):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
|
||||
def encrypt_folder(folder_path, password):
|
||||
"""Recursively encrypt files in a folder"""
|
||||
for item in glob.glob(os.path.join(folder_path, "*")):
|
||||
# Skip .gpg and .sha256 files
|
||||
if item.endswith(".gpg") or item.endswith(".sha256"):
|
||||
continue
|
||||
|
||||
# Handle directories recursively
|
||||
if os.path.isdir(item):
|
||||
encrypt_folder(item, password)
|
||||
continue
|
||||
|
||||
# Calculate current checksum
|
||||
current_checksum = calculate_checksum(item)
|
||||
checksum_file = f"{item}.sha256"
|
||||
|
||||
# Check if file changed since last encryption
|
||||
if os.path.exists(checksum_file):
|
||||
with open(checksum_file, "r") as f:
|
||||
previous_checksum = f.read().strip()
|
||||
|
||||
if current_checksum == previous_checksum:
|
||||
continue
|
||||
|
||||
# Remove existing .gpg file if it exists
|
||||
gpg_file = f"{item}.gpg"
|
||||
if os.path.exists(gpg_file):
|
||||
os.remove(gpg_file)
|
||||
|
||||
# Encrypt the file
|
||||
printfe("cyan", f"Encrypting {item}...")
|
||||
cmd = [
|
||||
"gpg",
|
||||
"--quiet",
|
||||
"--batch",
|
||||
"--yes",
|
||||
"--symmetric",
|
||||
"--cipher-algo",
|
||||
"AES256",
|
||||
"--armor",
|
||||
"--passphrase",
|
||||
password,
|
||||
"--output",
|
||||
gpg_file,
|
||||
item,
|
||||
]
|
||||
|
||||
success, _ = run_command(cmd)
|
||||
if success:
|
||||
printfe("cyan", f"Staging {item} for commit...")
|
||||
run_command(["git", "add", "-f", gpg_file])
|
||||
|
||||
# Update checksum file
|
||||
with open(checksum_file, "w") as f:
|
||||
f.write(current_checksum)
|
||||
else:
|
||||
printfe("red", f"Failed to encrypt {item}")
|
||||
|
||||
|
||||
def decrypt_folder(folder_path, password):
|
||||
"""Recursively decrypt files in a folder"""
|
||||
for item in glob.glob(os.path.join(folder_path, "*")):
|
||||
# Handle .gpg files
|
||||
if item.endswith(".gpg"):
|
||||
output_file = item[:-4] # Remove .gpg extension
|
||||
printfe("cyan", f"Decrypting {item}...")
|
||||
|
||||
cmd = [
|
||||
"gpg",
|
||||
"--quiet",
|
||||
"--batch",
|
||||
"--yes",
|
||||
"--decrypt",
|
||||
"--passphrase",
|
||||
password,
|
||||
"--output",
|
||||
output_file,
|
||||
item,
|
||||
]
|
||||
|
||||
success, _ = run_command(cmd)
|
||||
if not success:
|
||||
printfe("red", f"Failed to decrypt {item}")
|
||||
|
||||
# Process directories recursively
|
||||
elif os.path.isdir(item):
|
||||
printfe("cyan", f"Decrypting folder {item}...")
|
||||
decrypt_folder(item, password)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2 or sys.argv[1] not in ["encrypt", "decrypt"]:
|
||||
printfe("red", "Usage: secrets.py [encrypt|decrypt]")
|
||||
return 1
|
||||
|
||||
# Get the dotfiles path
|
||||
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
|
||||
secrets_path = os.path.join(dotfiles_path, "secrets")
|
||||
|
||||
# Get the password
|
||||
password = get_password()
|
||||
if not password:
|
||||
password = prompt_for_password()
|
||||
|
||||
# Perform the requested action
|
||||
if sys.argv[1] == "encrypt":
|
||||
printfe("cyan", "Encrypting secrets...")
|
||||
encrypt_folder(secrets_path, password)
|
||||
else: # decrypt
|
||||
printfe("cyan", "Decrypting secrets...")
|
||||
decrypt_folder(secrets_path, password)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
if is_wsl; then
|
||||
output=$(op.exe item get "Dotfiles Secrets" --fields password)
|
||||
else
|
||||
output=$(op item get "Dotfiles Secrets" --fields password)
|
||||
fi
|
||||
|
||||
# Check if command was a success
|
||||
if [[ $? -ne 0 ]]; then
|
||||
printfe "%s\n" "red" "Failed to fetch password from 1Password."
|
||||
fi
|
||||
|
||||
# In case the output does not contain use 'op item get, it means the password was fetched successfully
|
||||
# Without having to reveal the password using an external command
|
||||
if [[ ! $output == *"use 'op item get"* ]]; then
|
||||
password=$output
|
||||
else
|
||||
token=$(echo "$output" | grep -oP "(?<=\[use 'op item get ).*(?= --)")
|
||||
printfe "%s\n" "cyan" "Got fetch token: $token"
|
||||
|
||||
if is_wsl; then
|
||||
password=$(op.exe item get $token --reveal --field password)
|
||||
else
|
||||
password=$(op item get $token --reveal --fields password)
|
||||
fi
|
||||
fi
|
||||
|
||||
# only continue if password isn't empty
|
||||
if [[ -z "$password" ]]; then
|
||||
printfe "%s\n" "red" "Something went wrong while fetching the password from 1Password."
|
||||
|
||||
# Ask for manual input
|
||||
printfe "%s" "cyan" "Enter the password manually: "
|
||||
read -s password
|
||||
echo
|
||||
|
||||
if [[ -z "$password" ]]; then
|
||||
printfe "%s\n" "red" "Password cannot be empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printfe "%s\n" "green" "Password entered successfully."
|
||||
fi
|
||||
|
||||
encrypt_folder() {
|
||||
for file in $1/*; do
|
||||
# Skip if the current file is a .gpg file
|
||||
if [[ $file == *.gpg ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Skip if the current file is a .sha256 file
|
||||
if [[ $file == *.sha256 ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# If the file is a directory, call this function recursively
|
||||
if [[ -d $file ]]; then
|
||||
encrypt_folder $file
|
||||
continue
|
||||
fi
|
||||
|
||||
current_checksum=$(sha256sum "$file" | awk '{ print $1 }')
|
||||
checksum_file="$file.sha256"
|
||||
|
||||
if [[ -f $checksum_file ]]; then
|
||||
previous_checksum=$(cat $checksum_file)
|
||||
|
||||
if [[ $current_checksum == $previous_checksum ]]; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# If the file has an accompanying .gpg file, remove it
|
||||
if [[ -f $file.gpg ]]; then
|
||||
rm "$file.gpg"
|
||||
fi
|
||||
|
||||
printfe "%s\n" "cyan" "Encrypting $file..."
|
||||
gpg --quiet --batch --yes --symmetric --cipher-algo AES256 --armor --passphrase="$password" --output "$file.gpg" "$file"
|
||||
|
||||
# Update checksum file
|
||||
echo $current_checksum > "$checksum_file"
|
||||
done
|
||||
}
|
||||
|
||||
# Recursively decrypt all .gpg files under the folder specified, recursively call this function for sub folders!
|
||||
# Keep the original file name minus the .gpg extension
|
||||
decrypt_folder() {
|
||||
for file in $1/*; do
|
||||
# Skip if current file is a .gpg file
|
||||
if [[ $file == *.gpg ]]; then
|
||||
filename=$(basename $file .gpg)
|
||||
printfe "%s\n" "cyan" "Decrypting $file..."
|
||||
gpg --quiet --batch --yes --decrypt --passphrase="$password" --output $1/$filename $file
|
||||
fi
|
||||
|
||||
# If file is actually a folder, call this function recursively
|
||||
if [[ -d $file ]]; then
|
||||
printfe "%s\n" "cyan" "Decrypting folder $file..."
|
||||
decrypt_folder $file
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [[ "$1" == "decrypt" ]]; then
|
||||
printfe "%s\n" "cyan" "Decrypting secrets..."
|
||||
decrypt_folder ~/dotfiles/secrets
|
||||
elif [[ "$1" == "encrypt" ]]; then
|
||||
printfe "%s\n" "cyan" "Encrypting secrets..."
|
||||
encrypt_folder ~/dotfiles/secrets
|
||||
fi
|
375
bin/actions/service.py
Executable file
375
bin/actions/service.py
Executable file
@ -0,0 +1,375 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, println, logo
|
||||
|
||||
# Base directory for Docker services $HOME/services
|
||||
SERVICES_DIR = os.path.join(os.path.expanduser("~"), "services")
|
||||
# Protected services that should never be stopped
|
||||
PROTECTED_SERVICES = ["juicefs-redis"]
|
||||
|
||||
|
||||
def get_service_path(service_name):
|
||||
"""Return the path to a service's docker-compose file"""
|
||||
service_dir = os.path.join(SERVICES_DIR, service_name)
|
||||
compose_file = os.path.join(service_dir, "docker-compose.yml")
|
||||
|
||||
if not os.path.exists(compose_file):
|
||||
printfe("red", f"Error: Service '{service_name}' not found at {compose_file}")
|
||||
return None
|
||||
|
||||
return compose_file
|
||||
|
||||
|
||||
def run_docker_compose(args, service_name=None, compose_file=None):
|
||||
"""Run docker compose command with provided args"""
|
||||
if service_name and not compose_file:
|
||||
compose_file = get_service_path(service_name)
|
||||
if not compose_file:
|
||||
return 1
|
||||
|
||||
cmd = ["docker", "compose"]
|
||||
|
||||
if compose_file:
|
||||
cmd.extend(["-f", compose_file])
|
||||
|
||||
cmd.extend(args)
|
||||
|
||||
printfe("blue", f"Running: {' '.join(cmd)}")
|
||||
result = subprocess.run(cmd)
|
||||
return result.returncode
|
||||
|
||||
|
||||
def get_all_services():
|
||||
"""Return a list of all available services"""
|
||||
if not os.path.exists(SERVICES_DIR):
|
||||
return []
|
||||
|
||||
services = [
|
||||
d
|
||||
for d in os.listdir(SERVICES_DIR)
|
||||
if os.path.isdir(os.path.join(SERVICES_DIR, d))
|
||||
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
|
||||
]
|
||||
|
||||
return sorted(services)
|
||||
|
||||
|
||||
def cmd_start(args):
|
||||
"""Start a Docker service"""
|
||||
if args.all:
|
||||
services = get_all_services()
|
||||
if not services:
|
||||
printfe("yellow", "No services found to start")
|
||||
return 0
|
||||
|
||||
printfe("blue", f"Starting all services: {', '.join(services)}")
|
||||
|
||||
failed_services = []
|
||||
for service in services:
|
||||
printfe("blue", f"\n=== Starting {service} ===")
|
||||
result = run_docker_compose(["up", "-d"], service_name=service)
|
||||
if result != 0:
|
||||
failed_services.append(service)
|
||||
|
||||
if failed_services:
|
||||
printfe(
|
||||
"red",
|
||||
f"\nFailed to start the following services: {', '.join(failed_services)}",
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
printfe("green", "\nAll services started successfully")
|
||||
return 0
|
||||
else:
|
||||
return run_docker_compose(["up", "-d"], service_name=args.service)
|
||||
|
||||
|
||||
def cmd_stop(args):
|
||||
"""Stop a Docker service"""
|
||||
if args.all:
|
||||
running_services = get_all_running_services()
|
||||
if not running_services:
|
||||
printfe("yellow", "No running services found to stop")
|
||||
return 0
|
||||
|
||||
# Filter out the protected services
|
||||
safe_services = [s for s in running_services if s not in PROTECTED_SERVICES]
|
||||
|
||||
# Check if protected services were filtered out
|
||||
protected_running = [s for s in running_services if s in PROTECTED_SERVICES]
|
||||
if protected_running:
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Note: {', '.join(protected_running)} will not be stopped as they are protected services",
|
||||
)
|
||||
|
||||
if not safe_services:
|
||||
printfe(
|
||||
"yellow", "No services to stop (all running services are protected)"
|
||||
)
|
||||
return 0
|
||||
|
||||
printfe("blue", f"Stopping all running services: {', '.join(safe_services)}")
|
||||
|
||||
failed_services = []
|
||||
for service in safe_services:
|
||||
printfe("blue", f"\n=== Stopping {service} ===")
|
||||
result = run_docker_compose(["down"], service_name=service)
|
||||
if result != 0:
|
||||
failed_services.append(service)
|
||||
|
||||
if failed_services:
|
||||
printfe(
|
||||
"red",
|
||||
f"\nFailed to stop the following services: {', '.join(failed_services)}",
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
printfe("green", "\nAll running services stopped successfully")
|
||||
return 0
|
||||
else:
|
||||
# Check if trying to stop a protected service
|
||||
if args.service in PROTECTED_SERVICES:
|
||||
printfe(
|
||||
"red",
|
||||
f"Error: {args.service} is a protected service and cannot be stopped",
|
||||
)
|
||||
printfe(
|
||||
"yellow",
|
||||
f"The {args.service} service is required for other services to work properly",
|
||||
)
|
||||
return 1
|
||||
return run_docker_compose(["down"], service_name=args.service)
|
||||
|
||||
|
||||
def cmd_restart(args):
|
||||
"""Restart a Docker service"""
|
||||
return run_docker_compose(["restart"], service_name=args.service)
|
||||
|
||||
|
||||
def get_all_running_services():
|
||||
"""Return a list of all running services"""
|
||||
if not os.path.exists(SERVICES_DIR):
|
||||
return []
|
||||
|
||||
running_services = []
|
||||
services = [
|
||||
d
|
||||
for d in os.listdir(SERVICES_DIR)
|
||||
if os.path.isdir(os.path.join(SERVICES_DIR, d))
|
||||
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
|
||||
]
|
||||
|
||||
for service in services:
|
||||
if check_service_running(service) > 0:
|
||||
running_services.append(service)
|
||||
|
||||
return running_services
|
||||
|
||||
|
||||
def cmd_update(args):
|
||||
"""Update a Docker service by pulling new images and recreating containers if needed"""
|
||||
if args.all:
|
||||
running_services = get_all_running_services()
|
||||
if not running_services:
|
||||
printfe("yellow", "No running services found to update")
|
||||
return 0
|
||||
|
||||
printfe("blue", f"Updating all running services: {', '.join(running_services)}")
|
||||
|
||||
failed_services = []
|
||||
for service in running_services:
|
||||
printfe("blue", f"\n=== Updating {service} ===")
|
||||
|
||||
# Pull the latest images
|
||||
pull_result = run_docker_compose(["pull"], service_name=service)
|
||||
|
||||
# Bring the service up with the latest images
|
||||
up_result = run_docker_compose(["up", "-d"], service_name=service)
|
||||
|
||||
if pull_result != 0 or up_result != 0:
|
||||
failed_services.append(service)
|
||||
|
||||
if failed_services:
|
||||
printfe(
|
||||
"red",
|
||||
f"\nFailed to update the following services: {', '.join(failed_services)}",
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
printfe("green", "\nAll running services updated successfully")
|
||||
return 0
|
||||
else:
|
||||
# The original single-service update logic
|
||||
# First pull the latest images
|
||||
pull_result = run_docker_compose(["pull"], service_name=args.service)
|
||||
if pull_result != 0:
|
||||
return pull_result
|
||||
|
||||
# Then bring the service up with the latest images
|
||||
return run_docker_compose(["up", "-d"], service_name=args.service)
|
||||
|
||||
|
||||
def cmd_ps(args):
|
||||
"""Show Docker service status"""
|
||||
if args.service:
|
||||
return run_docker_compose(["ps"], service_name=args.service)
|
||||
else:
|
||||
return run_docker_compose(["ps"])
|
||||
|
||||
|
||||
def cmd_logs(args):
|
||||
"""Show Docker service logs"""
|
||||
cmd = ["logs"]
|
||||
|
||||
if args.follow:
|
||||
cmd.append("-f")
|
||||
|
||||
if args.tail:
|
||||
cmd.extend(["--tail", args.tail])
|
||||
|
||||
return run_docker_compose(cmd, service_name=args.service)
|
||||
|
||||
|
||||
def check_service_running(service_name):
|
||||
"""Check if service has running containers and return the count"""
|
||||
compose_file = get_service_path(service_name)
|
||||
if not compose_file:
|
||||
return 0
|
||||
|
||||
result = subprocess.run(
|
||||
["docker", "compose", "-f", compose_file, "ps", "--quiet"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Count non-empty lines to get container count
|
||||
containers = [line for line in result.stdout.strip().split("\n") if line]
|
||||
return len(containers)
|
||||
|
||||
|
||||
def cmd_list(args):
|
||||
"""List available Docker services"""
|
||||
if not os.path.exists(SERVICES_DIR):
|
||||
printfe("red", f"Error: Services directory not found at {SERVICES_DIR}")
|
||||
return 1
|
||||
|
||||
services = [
|
||||
d
|
||||
for d in os.listdir(SERVICES_DIR)
|
||||
if os.path.isdir(os.path.join(SERVICES_DIR, d))
|
||||
and os.path.exists(os.path.join(SERVICES_DIR, d, "docker-compose.yml"))
|
||||
]
|
||||
|
||||
if not services:
|
||||
printfe("yellow", "No Docker services found")
|
||||
return 0
|
||||
|
||||
println("Available Docker services:", "blue")
|
||||
for service in sorted(services):
|
||||
container_count = check_service_running(service)
|
||||
is_running = container_count > 0
|
||||
|
||||
if is_running:
|
||||
status = f"[RUNNING - {container_count} container{'s' if container_count > 1 else ''}]"
|
||||
color = "green"
|
||||
else:
|
||||
status = "[STOPPED]"
|
||||
color = "red"
|
||||
|
||||
printfe(color, f" - {service:<20} {status}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Manage Docker services")
|
||||
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
||||
|
||||
# Start command
|
||||
start_parser = subparsers.add_parser("start", help="Start a Docker service")
|
||||
start_group = start_parser.add_mutually_exclusive_group(required=True)
|
||||
start_group.add_argument("--all", action="store_true", help="Start all services")
|
||||
start_group.add_argument("service", nargs="?", help="Service to start")
|
||||
start_group.add_argument(
|
||||
"--service", dest="service", help="Service to start (deprecated)"
|
||||
)
|
||||
|
||||
# Stop command
|
||||
stop_parser = subparsers.add_parser("stop", help="Stop a Docker service")
|
||||
stop_group = stop_parser.add_mutually_exclusive_group(required=True)
|
||||
stop_group.add_argument(
|
||||
"--all", action="store_true", help="Stop all running services"
|
||||
)
|
||||
stop_group.add_argument("service", nargs="?", help="Service to stop")
|
||||
stop_group.add_argument(
|
||||
"--service", dest="service", help="Service to stop (deprecated)"
|
||||
)
|
||||
|
||||
# Restart command
|
||||
restart_parser = subparsers.add_parser("restart", help="Restart a Docker service")
|
||||
restart_parser.add_argument("service", help="Service to restart")
|
||||
|
||||
# Update command
|
||||
update_parser = subparsers.add_parser(
|
||||
"update",
|
||||
help="Update a Docker service (pull new images and recreate if needed)",
|
||||
)
|
||||
update_group = update_parser.add_mutually_exclusive_group(required=True)
|
||||
update_group.add_argument(
|
||||
"--all", action="store_true", help="Update all running services"
|
||||
)
|
||||
update_group.add_argument("service", nargs="?", help="Service to update")
|
||||
update_group.add_argument(
|
||||
"--service", dest="service", help="Service to update (deprecated)"
|
||||
)
|
||||
|
||||
# PS command
|
||||
ps_parser = subparsers.add_parser("ps", help="Show Docker service status")
|
||||
ps_parser.add_argument("service", nargs="?", help="Service to check")
|
||||
|
||||
# Logs command
|
||||
logs_parser = subparsers.add_parser("logs", help="Show Docker service logs")
|
||||
logs_parser.add_argument("service", help="Service to show logs for")
|
||||
logs_parser.add_argument(
|
||||
"-f", "--follow", action="store_true", help="Follow log output"
|
||||
)
|
||||
logs_parser.add_argument(
|
||||
"--tail", help="Number of lines to show from the end of logs"
|
||||
)
|
||||
|
||||
# List command and its alias
|
||||
subparsers.add_parser("list", help="List available Docker services")
|
||||
subparsers.add_parser("ls", help="List available Docker services (alias for list)")
|
||||
|
||||
# Parse arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.command:
|
||||
parser.print_help()
|
||||
return 1
|
||||
|
||||
# Execute the appropriate command
|
||||
commands = {
|
||||
"start": cmd_start,
|
||||
"stop": cmd_stop,
|
||||
"restart": cmd_restart,
|
||||
"update": cmd_update,
|
||||
"ps": cmd_ps,
|
||||
"logs": cmd_logs,
|
||||
"list": cmd_list,
|
||||
"ls": cmd_list, # Alias 'ls' to the same function as 'list'
|
||||
}
|
||||
|
||||
return commands[args.command](args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
368
bin/actions/update.py
Executable file
368
bin/actions/update.py
Executable file
@ -0,0 +1,368 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
# Import helper functions
|
||||
sys.path.append(os.path.join(os.path.expanduser("~/.dotfiles"), "bin"))
|
||||
from helpers.functions import printfe, run_command
|
||||
|
||||
|
||||
def help_message():
|
||||
"""Print help message and exit"""
|
||||
printfe("green", "Usage: upgrade.py [options]")
|
||||
printfe("green", "Options:")
|
||||
printfe("green", " --ha, -H Upgrade Home Manager packages.")
|
||||
printfe("green", " --ansible, -A Upgrade Ansible packages.")
|
||||
printfe(
|
||||
"green",
|
||||
" --ansible-verbose Upgrade Ansible packages with verbose output. (-vvv)",
|
||||
)
|
||||
printfe(
|
||||
"green",
|
||||
" --full-speed, -F Upgrade packages and use all available cores for compilation. (Default: 8 cores)",
|
||||
)
|
||||
printfe("green", " --help, -h Display this help message.")
|
||||
return 0
|
||||
|
||||
|
||||
def check_git_repository():
|
||||
"""Check for changes in the dotfiles git repository and prompt user to pull if needed"""
|
||||
dotfiles_path = os.environ.get("DOTFILES_PATH", os.path.expanduser("~/.dotfiles"))
|
||||
|
||||
printfe("cyan", "Checking for updates in dotfiles repository...")
|
||||
|
||||
# Change to dotfiles directory
|
||||
current_dir = os.getcwd()
|
||||
os.chdir(dotfiles_path)
|
||||
|
||||
# Check if this is a git repository
|
||||
status, _ = run_command(["git", "rev-parse", "--is-inside-work-tree"], shell=False)
|
||||
if not status:
|
||||
printfe("red", "The dotfiles directory is not a git repository.")
|
||||
os.chdir(current_dir)
|
||||
return False
|
||||
|
||||
# Get the current branch name
|
||||
status, current_branch = run_command(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"], shell=False
|
||||
)
|
||||
if not status:
|
||||
printfe("red", "Failed to determine current branch.")
|
||||
os.chdir(current_dir)
|
||||
return False
|
||||
|
||||
current_branch = current_branch.strip()
|
||||
|
||||
# Fetch the latest changes
|
||||
status, output = run_command(["git", "fetch"], shell=False)
|
||||
if not status:
|
||||
printfe(
|
||||
"yellow", f"Warning: Failed to fetch changes from git repository: {output}"
|
||||
)
|
||||
printfe("yellow", "Continuing update process without repository check...")
|
||||
os.chdir(current_dir)
|
||||
return True
|
||||
|
||||
# Check if remote branch exists
|
||||
status, output = run_command(
|
||||
["git", "ls-remote", "--heads", "origin", current_branch], shell=False
|
||||
)
|
||||
if not status or not output.strip():
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Remote branch 'origin/{current_branch}' not found. Using local branch only.",
|
||||
)
|
||||
os.chdir(current_dir)
|
||||
return True
|
||||
|
||||
# Check if we're behind the remote
|
||||
status, output = run_command(
|
||||
["git", "rev-list", f"HEAD..origin/{current_branch}", "--count"], shell=False
|
||||
)
|
||||
if not status:
|
||||
printfe("red", f"Failed to check for repository updates: {output}")
|
||||
os.chdir(current_dir)
|
||||
return False
|
||||
|
||||
behind_count = output.strip()
|
||||
if behind_count == "0":
|
||||
printfe(
|
||||
"green", f"Dotfiles repository is up to date on branch '{current_branch}'."
|
||||
)
|
||||
os.chdir(current_dir)
|
||||
return True
|
||||
|
||||
# Show what changes are available
|
||||
status, output = run_command(
|
||||
["git", "log", f"HEAD..origin/{current_branch}", "--oneline"], shell=False
|
||||
)
|
||||
if status:
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Your dotfiles repository is {behind_count} commit(s) behind on branch '{current_branch}'. Changes:",
|
||||
)
|
||||
for line in output.strip().splitlines():
|
||||
printfe("yellow", f" • {line}")
|
||||
else:
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Your dotfiles repository is {behind_count} commit(s) behind on branch '{current_branch}'.",
|
||||
)
|
||||
|
||||
# Ask user if they want to pull changes
|
||||
response = input("Do you want to pull these changes? (yes/no): ").strip().lower()
|
||||
|
||||
if response in ["yes", "y"]:
|
||||
status, output = run_command(
|
||||
["git", "pull", "origin", current_branch], shell=False
|
||||
)
|
||||
if not status:
|
||||
printfe("red", f"Failed to pull changes: {output}")
|
||||
os.chdir(current_dir)
|
||||
return False
|
||||
printfe("green", "Successfully updated dotfiles repository.")
|
||||
else:
|
||||
printfe("yellow", "Skipping repository update.")
|
||||
|
||||
os.chdir(current_dir)
|
||||
return True
|
||||
|
||||
|
||||
def ensure_ansible_collections():
|
||||
"""Ensure required Ansible collections are installed"""
|
||||
# List of required collections that can be expanded in the future
|
||||
required_collections = [
|
||||
"community.general",
|
||||
]
|
||||
|
||||
printfe("cyan", "Checking for required Ansible collections...")
|
||||
|
||||
# Get list of installed collections using ansible-galaxy
|
||||
status, output = run_command(["ansible-galaxy", "collection", "list"], shell=False)
|
||||
if not status:
|
||||
printfe("yellow", f"Failed to list Ansible collections: {output}")
|
||||
printfe("yellow", "Will try to install all required collections.")
|
||||
installed_collections = []
|
||||
else:
|
||||
# Parse output to get installed collections
|
||||
installed_collections = []
|
||||
|
||||
# Split output into lines and process
|
||||
lines = output.splitlines()
|
||||
collection_section = False
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
# Skip empty lines
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Check if we've reached the collection listing section
|
||||
if line.startswith("Collection"):
|
||||
collection_section = True
|
||||
continue
|
||||
|
||||
# Skip the separator line after the header
|
||||
if collection_section and line.startswith("--"):
|
||||
continue
|
||||
|
||||
# Process collection entries
|
||||
if collection_section and " " in line:
|
||||
# Format is typically: "community.general 10.4.0"
|
||||
parts = line.split()
|
||||
if len(parts) >= 1:
|
||||
collection_name = parts[0]
|
||||
installed_collections.append(collection_name)
|
||||
|
||||
# Check which required collections are missing
|
||||
missing_collections = []
|
||||
for collection in required_collections:
|
||||
if collection not in installed_collections:
|
||||
missing_collections.append(collection)
|
||||
|
||||
# Install missing collections
|
||||
if missing_collections:
|
||||
for collection in missing_collections:
|
||||
printfe("yellow", f"Installing {collection} collection...")
|
||||
status, install_output = run_command(
|
||||
["ansible-galaxy", "collection", "install", collection], shell=False
|
||||
)
|
||||
if not status:
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Warning: Failed to install {collection} collection: {install_output}",
|
||||
)
|
||||
printfe(
|
||||
"yellow",
|
||||
f"Continuing anyway, but playbook might fail if it requires {collection}",
|
||||
)
|
||||
else:
|
||||
printfe("green", f"Successfully installed {collection} collection")
|
||||
else:
|
||||
printfe("green", "All required collections are already installed.")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
# Parse arguments
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
parser.add_argument(
|
||||
"--ha", "-H", action="store_true", help="Upgrade Home Manager packages"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ansible", "-A", action="store_true", help="Upgrade Ansible packages"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ansible-verbose",
|
||||
action="store_true",
|
||||
help="Upgrade Ansible packages with verbose output",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--full-speed", "-F", action="store_true", help="Use all available cores"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--help", "-h", action="store_true", help="Display help message"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.help:
|
||||
return help_message()
|
||||
|
||||
# If no specific option provided, run all
|
||||
if not args.ha and not args.ansible and not args.ansible_verbose:
|
||||
args.ha = True
|
||||
args.ansible = True
|
||||
|
||||
# If ansible_verbose is set, also set ansible
|
||||
if args.ansible_verbose:
|
||||
args.ansible = True
|
||||
|
||||
# Always check git repository first
|
||||
if not check_git_repository():
|
||||
printfe("red", "Failed to check or update dotfiles repository.")
|
||||
return 1
|
||||
|
||||
# Set cores and jobs based on full-speed flag
|
||||
if args.full_speed:
|
||||
import multiprocessing
|
||||
|
||||
cores = jobs = multiprocessing.cpu_count()
|
||||
else:
|
||||
cores = 8
|
||||
jobs = 1
|
||||
|
||||
printfe("cyan", f"Limiting to {cores} cores with {jobs} jobs.")
|
||||
|
||||
# Home Manager update
|
||||
if args.ha:
|
||||
dotfiles_path = os.environ.get(
|
||||
"DOTFILES_PATH", os.path.expanduser("~/.dotfiles")
|
||||
)
|
||||
hostname = os.uname().nodename
|
||||
|
||||
printfe("cyan", "Updating Home Manager flake...")
|
||||
os.chdir(f"{dotfiles_path}/config/home-manager")
|
||||
status, output = run_command(
|
||||
[
|
||||
"nix",
|
||||
"--extra-experimental-features",
|
||||
"nix-command",
|
||||
"--extra-experimental-features",
|
||||
"flakes",
|
||||
"flake",
|
||||
"update",
|
||||
],
|
||||
shell=False,
|
||||
)
|
||||
if not status:
|
||||
printfe("red", f"Failed to update Home Manager flake: {output}")
|
||||
return 1
|
||||
|
||||
# Check if home-manager is installed
|
||||
status, _ = run_command(["which", "home-manager"], shell=False)
|
||||
if status:
|
||||
printfe("cyan", "Cleaning old backup files...")
|
||||
backup_file = os.path.expanduser("~/.config/mimeapps.list.backup")
|
||||
if os.path.exists(backup_file):
|
||||
os.remove(backup_file)
|
||||
|
||||
printfe("cyan", "Upgrading Home Manager packages...")
|
||||
env = os.environ.copy()
|
||||
env["NIXPKGS_ALLOW_UNFREE"] = "1"
|
||||
|
||||
cmd = [
|
||||
"home-manager",
|
||||
"--extra-experimental-features",
|
||||
"nix-command",
|
||||
"--extra-experimental-features",
|
||||
"flakes",
|
||||
"switch",
|
||||
"-b",
|
||||
"backup",
|
||||
f"--flake",
|
||||
f".#{hostname}",
|
||||
"--impure",
|
||||
"--cores",
|
||||
str(cores),
|
||||
"-j",
|
||||
str(jobs),
|
||||
]
|
||||
|
||||
result = subprocess.run(cmd, env=env)
|
||||
if result.returncode != 0:
|
||||
printfe("red", "Failed to upgrade Home Manager packages.")
|
||||
return 1
|
||||
else:
|
||||
printfe("red", "Home Manager is not installed.")
|
||||
return 1
|
||||
|
||||
# Ansible update
|
||||
if args.ansible:
|
||||
dotfiles_path = os.environ.get(
|
||||
"DOTFILES_PATH", os.path.expanduser("~/.dotfiles")
|
||||
)
|
||||
hostname = os.uname().nodename
|
||||
username = os.environ.get("USER", os.environ.get("USERNAME", "user"))
|
||||
|
||||
# Ensure required collections are installed
|
||||
if not ensure_ansible_collections():
|
||||
printfe(
|
||||
"red", "Failed to ensure required Ansible collections are installed"
|
||||
)
|
||||
return 1
|
||||
|
||||
printfe("cyan", "Running Ansible playbook...")
|
||||
ansible_cmd = [
|
||||
"/usr/bin/env",
|
||||
"ansible-playbook",
|
||||
"-i",
|
||||
f"{dotfiles_path}/config/ansible/inventory.ini",
|
||||
f"{dotfiles_path}/config/ansible/playbook.yml",
|
||||
"--extra-vars",
|
||||
f"hostname={hostname}",
|
||||
"--extra-vars",
|
||||
f"ansible_user={username}",
|
||||
"--limit",
|
||||
hostname,
|
||||
"--ask-become-pass",
|
||||
]
|
||||
|
||||
if args.ansible_verbose:
|
||||
ansible_cmd.append("-vvv")
|
||||
|
||||
result = subprocess.run(ansible_cmd)
|
||||
if result.returncode != 0:
|
||||
printfe("red", "Failed to upgrade Ansible packages.")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -1,229 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
# check if --verbose was passed
|
||||
if [ "$2" = "--verbose" ]; then
|
||||
export verbose=true
|
||||
printfe "%s\n" "yellow" "Verbose mode enabled"
|
||||
else
|
||||
export verbose=false
|
||||
fi
|
||||
|
||||
# Check if we have shyaml since that's required for the script to function
|
||||
if [ ! -x "$(command -v shyaml)" ]; then
|
||||
printfe "%s\n" "red" "shyaml is not installed, installing it..."
|
||||
pipx install shyaml
|
||||
fi
|
||||
|
||||
ensure_symlink() {
|
||||
local source
|
||||
local target
|
||||
|
||||
# Fetch target from YAML
|
||||
target=$(shyaml get-value "config.symlinks.$1.target" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
|
||||
# Fetch source from YAML based on OS
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
# Check for WSL2
|
||||
if [[ $(uname -a) == *"microsoft-standard-WSL2"* ]]; then
|
||||
source=$(shyaml get-value "config.symlinks.$1.sources.wsl" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
else
|
||||
source=$(shyaml get-value "config.symlinks.$1.sources.linux" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
fi
|
||||
elif [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
source=$(shyaml get-value "config.symlinks.$1.sources.macos" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
fi
|
||||
|
||||
# Fall back to generic source if OS-specific source is empty
|
||||
if [ -z "$source" ]; then
|
||||
source=$(shyaml get-value "config.symlinks.$1.source" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
fi
|
||||
|
||||
# Attempt to use the hostname of the machine if source is still empty
|
||||
if [ -z "$source" ]; then
|
||||
source=$(shyaml get-value "config.symlinks.$1.sources.$(hostname)" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
fi
|
||||
|
||||
# Error out if source is still empty
|
||||
if [ -z "$source" ]; then
|
||||
printfe "%s\n" "red" " - No valid source defined for $1"
|
||||
return
|
||||
fi
|
||||
|
||||
# Expand ~ with $HOME
|
||||
source="${source/#\~/$HOME}"
|
||||
target="${target/#\~/$HOME}"
|
||||
|
||||
# Call the function to check or make the symlink
|
||||
check_or_make_symlink "$source" "$target"
|
||||
|
||||
# Check if there is a chmod defined for the target file
|
||||
desired_chmod=$(shyaml get-value "config.symlinks.$1.chmod" < "$HOME/dotfiles/config/config.yaml" 2>/dev/null)
|
||||
|
||||
if [ -n "$desired_chmod" ]; then
|
||||
# Resolve the target if it is a symlink
|
||||
resolved_target=$(readlink -f "$target")
|
||||
|
||||
# If readlink fails, fall back to the original target
|
||||
if [ -z "$resolved_target" ]; then
|
||||
resolved_target="$target"
|
||||
fi
|
||||
|
||||
current_chmod=$(stat -c %a "$resolved_target" 2>/dev/null)
|
||||
if [ "$current_chmod" != "$desired_chmod" ]; then
|
||||
printfe "%s\n" "yellow" " - Changing chmod of $resolved_target to $desired_chmod"
|
||||
chmod "$desired_chmod" "$resolved_target"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
symlinks() {
|
||||
####################################################################################################
|
||||
# Update symlinks
|
||||
####################################################################################################
|
||||
|
||||
# Load symlinks from config file
|
||||
symlinks=($(cat $HOME/dotfiles/config/config.yaml | shyaml keys config.symlinks))
|
||||
printfe "%s\n" "cyan" "Updating symlinks..."
|
||||
|
||||
for symlink in "${symlinks[@]}"; do
|
||||
ensure_symlink $symlink
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
####################################################################################################
|
||||
# Update packages
|
||||
####################################################################################################
|
||||
|
||||
sys_packages() {
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
printfe "%s\n" "cyan" "Updating brew packages..."
|
||||
brew update
|
||||
brew upgrade
|
||||
brew cleanup
|
||||
else
|
||||
if [ -x "$(command -v nixos-version)" ]; then
|
||||
printfe "%s\n" "cyan" "Updating nix channels..."
|
||||
printfe "%s" "cyan" "System channels: "
|
||||
sudo -i nix-channel --update
|
||||
|
||||
printfe "%s" "cyan" "User channels: "
|
||||
nix-channel --update
|
||||
|
||||
printfe "%s\n" "cyan" "Updating nixos flake..."
|
||||
cd $HOME/dotfiles/config/nixos && nix --extra-experimental-features nix-command --extra-experimental-features flakes flake update
|
||||
|
||||
# Exit if this failed
|
||||
if [ $? -ne 0 ]; then
|
||||
exit $?
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if [ -x "$(command -v apt)" ]; then
|
||||
printfe "%s\n" "cyan" "Updating apt packages..."
|
||||
sudo nala upgrade -y
|
||||
sudo nala autoremove -y --purge
|
||||
fi
|
||||
|
||||
if [ -x "$(command -v yum)" ]; then
|
||||
printfe "%s\n" "cyan" "Updating yum packages..."
|
||||
sudo yum update -y
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
cargopkgs() {
|
||||
printfe "%s\n" "cyan" "Ensuring Cargo packages are installed..."
|
||||
source $HOME/dotfiles/bin/helpers/cargo_packages.sh
|
||||
ensure_cargo_packages_installed
|
||||
}
|
||||
|
||||
pipxpkgs() {
|
||||
if [ ! -x "$(command -v pipx)" ]; then
|
||||
printfe "%s\n" "yellow" "pipx is not available, skipping pipx packages."
|
||||
return
|
||||
fi
|
||||
|
||||
printfe "%s\n" "cyan" "Ensuring pipx packages are installed..."
|
||||
source $HOME/dotfiles/bin/helpers/pipx_packages.sh
|
||||
ensure_pipx_packages_installed
|
||||
}
|
||||
|
||||
flatpakpkgs() {
|
||||
if [ ! -x "$(command -v flatpak)" ]; then
|
||||
printfe "%s\n" "yellow" "Flatpak is not available, skipping Flatpak."
|
||||
return
|
||||
fi
|
||||
|
||||
if is_wsl; then
|
||||
printfe "%s\n" "yellow" "Running in WSL, skipping Flatpak."
|
||||
return
|
||||
fi
|
||||
|
||||
printfe "%s\n" "cyan" "Ensuring Flatpak packages are installed..."
|
||||
source $HOME/dotfiles/bin/helpers/flatpak_packages.sh
|
||||
ensure_flatpak_packages_installed
|
||||
}
|
||||
|
||||
homemanager() {
|
||||
printfe "%s\n" "cyan" "Updating Home Manager flake..."
|
||||
cd $HOME/dotfiles/config/home-manager && nix --extra-experimental-features nix-command --extra-experimental-features flakes flake update
|
||||
}
|
||||
|
||||
####################################################################################################
|
||||
# Parse arguments
|
||||
####################################################################################################
|
||||
|
||||
if [ "$#" -eq 0 ]; then
|
||||
printfe "%s\n" "yellow" "No options passed, running full update..."
|
||||
|
||||
symlinks
|
||||
sys_packages
|
||||
homemanager
|
||||
cargopkgs
|
||||
pipxpkgs
|
||||
flatpakpkgs
|
||||
dotf secrets encrypt
|
||||
else
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--nixos|nixos|nix|nixos-rebuild)
|
||||
sys_packages
|
||||
;;
|
||||
--home-manager|--homemanager|ha|hm|home)
|
||||
homemanager
|
||||
;;
|
||||
--nix)
|
||||
sys_packages
|
||||
homemanager
|
||||
;;
|
||||
--symlinks)
|
||||
symlinks
|
||||
;;
|
||||
--packages)
|
||||
sys_packages
|
||||
cargopkgs
|
||||
pipxpkgs
|
||||
flatpakpkgs
|
||||
;;
|
||||
--pipx)
|
||||
pipxpkgs
|
||||
;;
|
||||
--cargo)
|
||||
cargopkgs
|
||||
;;
|
||||
--flatpak)
|
||||
flatpakpkgs
|
||||
;;
|
||||
*)
|
||||
printfe "%s\n" "red" "Unknown option: $arg"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
printfe "%s\n" "blue" "Done!"
|
@ -1,72 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
help() {
|
||||
printfe "%s\n" "green" "Usage: upgrade.sh [options]"
|
||||
printfe "%s\n" "green" "Options:"
|
||||
printfe "%s\n" "green" " --ha, -H Upgrade Home Manager packages."
|
||||
printfe "%s\n" "green" " --nix, -X Upgrade NixOS packages."
|
||||
printfe "%s\n" "green" " --full-speed, -F Upgrade packages and use all available cores for compilation. (Default: 8 cores)"
|
||||
printfe "%s\n" "green" " --help, -h Display this help message."
|
||||
exit 0
|
||||
}
|
||||
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case $1 in
|
||||
--ha|-H) RUN_HA=true ;;
|
||||
--nix|-X) RUN_NIX=true ;;
|
||||
--full-speed|-F) FULL_SPEED=true ;;
|
||||
--help|-h) help ;;
|
||||
*) echo "Unknown parameter passed: $1";
|
||||
help ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ -z "$RUN_HA" && -z "$RUN_NIX" ]]; then
|
||||
RUN_HA=true
|
||||
RUN_NIX=true
|
||||
fi
|
||||
|
||||
# Check if --full-speed flag is passed, otherwise use --cores 8 -j 1
|
||||
if [[ "$FULL_SPEED" == true ]]; then
|
||||
CORES=$(nproc)
|
||||
JOBS=$(nproc)
|
||||
else
|
||||
CORES=8
|
||||
JOBS=1
|
||||
fi
|
||||
|
||||
printfe "%s\n" "cyan" "Limiting to $CORES cores with $JOBS jobs."
|
||||
|
||||
if [[ "$RUN_NIX" == true ]]; then
|
||||
if command -v nixos-rebuild &> /dev/null; then
|
||||
printfe "%s\n" "cyan" "Upgrading NixOS packages..."
|
||||
cd $HOME/dotfiles/config/nixos && sudo nixos-rebuild switch --upgrade --flake .#$DOTF_HOSTNAME --impure --cores $CORES -j $JOBS
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
printfe "%s\n" "red" "Failed to upgrade NixOS packages."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
printfe "%s\n" "red" "Skipping nixos-rebuild, NixOS is not installed."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$RUN_HA" == true ]]; then
|
||||
if command -v home-manager &> /dev/null; then
|
||||
printfe "%s\n" "cyan" "Cleaning old backup files..."
|
||||
rm -rf $HOME/.config/mimeapps.list.backup
|
||||
|
||||
printfe "%s\n" "cyan" "Upgrading Home Manager packages..."
|
||||
cd $HOME/dotfiles/config/home-manager && NIXPKGS_ALLOW_UNFREE=1 home-manager --extra-experimental-features nix-command --extra-experimental-features flakes switch -b backup --flake .#$DOTF_HOSTNAME --impure --cores $CORES -j $JOBS
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
printfe "%s\n" "red" "Failed to upgrade Home Manager packages."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
printfe "%s\n" "red" "Home Manager is not installed."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
199
bin/dotf
199
bin/dotf
@ -1,135 +1,126 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# strict mode
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print('Exiting.')
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# Script constants
|
||||
readonly DOTFILES_ROOT="$HOME/dotfiles"
|
||||
readonly DOTFILES_BIN="$DOTFILES_ROOT/bin"
|
||||
readonly DOTFILES_CONFIG="$DOTFILES_ROOT/config/config.yaml"
|
||||
DOTFILES_ROOT = os.path.expanduser("~/.dotfiles")
|
||||
DOTFILES_BIN = os.path.join(DOTFILES_ROOT, "bin")
|
||||
DOTFILES_PATH = DOTFILES_ROOT # For compatibility with the original scripts
|
||||
|
||||
# Source helper functions
|
||||
if [[ ! -f "$DOTFILES_BIN/helpers/functions.sh" ]]; then
|
||||
echo "Error: Required helper functions not found"
|
||||
exit 1
|
||||
fi
|
||||
source "$DOTFILES_BIN/helpers/functions.sh"
|
||||
# Import helper functions
|
||||
sys.path.append(DOTFILES_BIN)
|
||||
from helpers.functions import printfe, ensure_dependencies
|
||||
|
||||
export DOTFILES_CONFIG
|
||||
ensure_dependencies()
|
||||
|
||||
# Command functions
|
||||
update() {
|
||||
local update_script="$DOTFILES_BIN/actions/update.sh"
|
||||
if [[ ! -x "$update_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Update script not found or not executable"
|
||||
def run_script(script_path, args):
|
||||
"""Run an action script with the given arguments"""
|
||||
if not os.path.isfile(script_path) or not os.access(script_path, os.X_OK):
|
||||
printfe("red", f"Error: Script not found or not executable: {script_path}")
|
||||
return 1
|
||||
fi
|
||||
"$update_script" $@
|
||||
}
|
||||
|
||||
result = subprocess.run([script_path] + args, env={**os.environ, "DOTFILES_PATH": DOTFILES_PATH})
|
||||
return result.returncode
|
||||
|
||||
upgrade() {
|
||||
local upgrade_script="$DOTFILES_BIN/actions/upgrade.sh"
|
||||
if [[ ! -x "$upgrade_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Upgrade script not found or not executable"
|
||||
return 1
|
||||
fi
|
||||
"$upgrade_script" $@
|
||||
}
|
||||
def update(args):
|
||||
"""Run the update action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/update.py", args)
|
||||
|
||||
hello() {
|
||||
local term_script="$DOTFILES_BIN/actions/hello.sh"
|
||||
if [[ ! -x "$term_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Terminal script not found or not executable"
|
||||
return 1
|
||||
fi
|
||||
"$term_script" "$@"
|
||||
}
|
||||
def hello(args):
|
||||
"""Run the hello action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/hello.py", args)
|
||||
|
||||
help() {
|
||||
local help_script="$DOTFILES_BIN/actions/help.sh"
|
||||
if [[ ! -x "$help_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Help script not found or not executable"
|
||||
return 1
|
||||
fi
|
||||
"$help_script" "$@"
|
||||
}
|
||||
def help(args):
|
||||
"""Run the help action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/help.py", args)
|
||||
|
||||
secrets() {
|
||||
local secrets_script="$DOTFILES_BIN/actions/secrets.sh"
|
||||
if [[ ! -x "$secrets_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Secrets script not found or not executable"
|
||||
return 1
|
||||
fi
|
||||
"$secrets_script" "$@"
|
||||
}
|
||||
def secrets(args):
|
||||
"""Run the secrets action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/secrets.py", args)
|
||||
|
||||
auto_start() {
|
||||
local auto_start_script="$DOTFILES_BIN/actions/auto-start.sh"
|
||||
if [[ ! -x "$auto_start_script" ]]; then
|
||||
printfe "%s\n" "red" "Error: Auto-start script not found or not executable"
|
||||
return 1
|
||||
fi
|
||||
"$auto_start_script" "$@"
|
||||
}
|
||||
def auto_start(args):
|
||||
"""Run the auto-start action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/auto-start.py", args)
|
||||
|
||||
ensure_git_hooks() {
|
||||
local hooks_dir="$DOTFILES_ROOT/.git/hooks"
|
||||
local target_link="$DOTFILES_BIN/actions/git"
|
||||
def service(args):
|
||||
"""Run the service/docker action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/service.py", args)
|
||||
|
||||
def lint(args):
|
||||
"""Run the lint action"""
|
||||
return run_script(f"{DOTFILES_BIN}/actions/lint.py", args)
|
||||
|
||||
def ensure_git_hooks():
|
||||
"""Ensure git hooks are correctly set up"""
|
||||
hooks_dir = os.path.join(DOTFILES_ROOT, ".git/hooks")
|
||||
target_link = os.path.join(DOTFILES_BIN, "actions/git")
|
||||
|
||||
# Validate target directory exists
|
||||
if [[ ! -d "$target_link" ]]; then
|
||||
printfe "%s\n" "red" "Error: Git hooks source directory does not exist: $target_link"
|
||||
if not os.path.isdir(target_link):
|
||||
printfe("red", f"Error: Git hooks source directory does not exist: {target_link}")
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Handle existing symlink
|
||||
if [[ -L "$hooks_dir" ]]; then
|
||||
local current_link
|
||||
current_link=$(readlink "$hooks_dir")
|
||||
if [[ "$current_link" != "$target_link" ]]; then
|
||||
printfe "%s\n" "yellow" "Incorrect git hooks symlink found. Removing and recreating..."
|
||||
rm "$hooks_dir"
|
||||
else
|
||||
if os.path.islink(hooks_dir):
|
||||
current_link = os.readlink(hooks_dir)
|
||||
if current_link != target_link:
|
||||
printfe("yellow", "Incorrect git hooks symlink found. Removing and recreating...")
|
||||
os.remove(hooks_dir)
|
||||
else:
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Handle existing directory
|
||||
if [[ -d "$hooks_dir" ]]; then
|
||||
printfe "%s\n" "yellow" "Removing existing hooks directory..."
|
||||
rm -rf "$hooks_dir"
|
||||
fi
|
||||
if os.path.isdir(hooks_dir) and not os.path.islink(hooks_dir):
|
||||
printfe("yellow", "Removing existing hooks directory...")
|
||||
import shutil
|
||||
shutil.rmtree(hooks_dir)
|
||||
|
||||
# Create new symlink
|
||||
if ln -s "$target_link" "$hooks_dir"; then
|
||||
printfe "%s\n" "green" "Git hooks successfully configured!"
|
||||
else
|
||||
printfe "%s\n" "red" "Failed to create git hooks symlink"
|
||||
try:
|
||||
os.symlink(target_link, hooks_dir)
|
||||
printfe("green", "Git hooks successfully configured!")
|
||||
return 0
|
||||
except Exception as e:
|
||||
printfe("red", f"Failed to create git hooks symlink: {e}")
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
def main():
|
||||
# Ensure we're in the correct directory
|
||||
if [[ ! -d "$DOTFILES_ROOT" ]]; then
|
||||
printfe "%s\n" "red" "Error: Dotfiles directory not found"
|
||||
exit 1
|
||||
fi
|
||||
if not os.path.isdir(DOTFILES_ROOT):
|
||||
printfe("red", "Error: Dotfiles directory not found")
|
||||
return 1
|
||||
|
||||
# Setup git hooks
|
||||
ensure_git_hooks || exit 1
|
||||
if ensure_git_hooks() != 0:
|
||||
return 1
|
||||
|
||||
# Parse commands
|
||||
case "${1:-help}" in
|
||||
update) shift; update "$@" ;;
|
||||
upgrade) shift; upgrade "$@" ;;
|
||||
help) shift; help "$@" ;;
|
||||
hello) shift; hello "$@" ;;
|
||||
secrets) shift; secrets "$@" ;;
|
||||
auto-start) shift; auto_start "$@" ;;
|
||||
*) help ;;
|
||||
esac
|
||||
}
|
||||
command = sys.argv[1] if len(sys.argv) > 1 else "help"
|
||||
args = sys.argv[2:]
|
||||
|
||||
main "$@"
|
||||
commands = {
|
||||
"update": update,
|
||||
"help": help,
|
||||
"hello": hello,
|
||||
"secrets": secrets,
|
||||
"auto-start": auto_start,
|
||||
"service": service,
|
||||
"lint": lint
|
||||
}
|
||||
|
||||
if command in commands:
|
||||
return commands[command](args)
|
||||
else:
|
||||
return help([])
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
@ -1,72 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
ensure_cargo_packages_installed() {
|
||||
cargo_packages=($(cat $DOTFILES_CONFIG | shyaml keys config.packages.cargo))
|
||||
for package in "${cargo_packages[@]}"; do
|
||||
printfe "%s" "cyan" " - Checking $package..."
|
||||
echo -en '\r'
|
||||
|
||||
# Some entries have a git_url and binary, we need to load these in if they exist
|
||||
pkg_status=$(cargo install --list | grep -E "^${package}\sv[0-9.]+:$")
|
||||
package_url=$(cat $DOTFILES_CONFIG | shyaml get-value config.packages.cargo.$package.git_url 2>/dev/null)
|
||||
binary=$(cat $DOTFILES_CONFIG | shyaml get-value config.packages.cargo.$package.binary 2>/dev/null)
|
||||
|
||||
# If pkg_status is `installed` then we don't need to install the package, otherwise if it's empty then the package is not installed
|
||||
if [ -z "$pkg_status" ]; then
|
||||
ensure_sudo_privileges "In order to install $package, please provide your password:"
|
||||
printfe "%s" "yellow" " - Compiling/Installing $package... (This may take a while)"
|
||||
clear_line
|
||||
|
||||
# If package_url is defined we should install via git
|
||||
if [ -n "$package_url" ]; then
|
||||
command="cargo install --git $package_url $binary"
|
||||
else
|
||||
command="cargo install $package"
|
||||
fi
|
||||
|
||||
# Execute the command
|
||||
result=$(eval $command 2>&1)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
printfe "%s\n" "red" " - Failed to install $package"
|
||||
printfe "%s\n" "red" " Command: $command"
|
||||
printfe "%s\n" "red" " Output: $result"
|
||||
exit 1
|
||||
fi
|
||||
printfe "%s\n" "green" " - Installed $package"
|
||||
else
|
||||
printfe "%s\n" "green" " - $package is already installed"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
print_cargo_status() {
|
||||
printfe "%s" "cyan" "Checking Cargo packages..."
|
||||
clear_line
|
||||
|
||||
cargo_packages=($(cat $DOTFILES_CONFIG | shyaml keys config.packages.cargo))
|
||||
count=$(echo $cargo_packages | wc -w)
|
||||
installed=0
|
||||
|
||||
for package in "${cargo_packages[@]}"; do
|
||||
pkg_status=$(cargo install --list | grep -E "^${package}\sv[0-9.]+:$")
|
||||
|
||||
if [ -z $pkg_status ]; then
|
||||
if [ "$verbose" = true ]; then
|
||||
printfe "%s\n" "red" "$package is not installed"
|
||||
fi
|
||||
else
|
||||
installed=$((installed + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
printfe "%s" "cyan" "Cargo"
|
||||
if [ $installed -eq $count ]; then
|
||||
printfe "%s" "green" " $installed/$count "
|
||||
else
|
||||
printfe "%s" "red" " $installed/$count "
|
||||
fi
|
||||
printfe "%s\n" "cyan" "packages installed"
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
ensure_flatpak_packages_installed() {
|
||||
flatpak_packages=($(ls $HOME/dotfiles/config/flatpaks/ | sed 's/.flatpakref//g'))
|
||||
|
||||
for package in "${flatpak_packages[@]}"; do
|
||||
if ! flatpak list | grep -q $package; then
|
||||
printfe "%s\n" "cyan" " - Installing $package..."
|
||||
flatpak install -y flathub $package
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
printfe "%s\n" "green" " - $package installed successfully"
|
||||
else
|
||||
printfe "%s\n" "red" " - $package failed to install"
|
||||
fi
|
||||
else
|
||||
printfe "%s\n" "green" " - $package is already installed"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
print_flatpak_status() {
|
||||
if is_wsl; then
|
||||
printfe "%s\n" "yellow" "Running in WSL, skipping Flatpak packages check."
|
||||
return
|
||||
fi
|
||||
|
||||
printfe "%s" "cyan" "Checking Flatpak packages..."
|
||||
clear_line
|
||||
|
||||
flatpak_packages=($(ls $HOME/dotfiles/config/flatpaks/ | sed 's/.flatpakref//g'))
|
||||
|
||||
count=$(echo $flatpak_packages | wc -w)
|
||||
installed=0
|
||||
|
||||
for package in "${flatpak_packages[@]}"; do
|
||||
if flatpak list | grep -q $package; then
|
||||
installed=$((installed + 1))
|
||||
else
|
||||
if [ "$verbose" = true ]; then
|
||||
printfe "%s\n" "red" "$package is not installed"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
printfe "%s" "cyan" "Flatpak"
|
||||
if [ $installed -eq $count ]; then
|
||||
printfe "%s" "green" " $installed/$count "
|
||||
else
|
||||
printfe "%s" "red" " $installed/$count "
|
||||
fi
|
||||
printfe "%s\n" "cyan" "packages installed"
|
||||
}
|
177
bin/helpers/functions.py
Normal file
177
bin/helpers/functions.py
Normal file
@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import math
|
||||
import random
|
||||
import shutil
|
||||
import datetime
|
||||
|
||||
try:
|
||||
import pyfiglet
|
||||
except ImportError:
|
||||
pyfiglet = None
|
||||
|
||||
# Color codes for terminal output
|
||||
COLORS = {
|
||||
"black": "\033[0;30m",
|
||||
"red": "\033[0;31m",
|
||||
"green": "\033[0;32m",
|
||||
"yellow": "\033[0;33m",
|
||||
"blue": "\033[0;34m",
|
||||
"purple": "\033[0;35m",
|
||||
"cyan": "\033[0;36m",
|
||||
"white": "\033[0;37m",
|
||||
"grey": "\033[0;90m", # Added grey color for timestamp
|
||||
"reset": "\033[0m",
|
||||
}
|
||||
|
||||
|
||||
def printfe(color, message, show_time=True):
|
||||
"""
|
||||
Print a formatted message with the specified color
|
||||
With timestamp and message type prefix similar to setup.sh
|
||||
"""
|
||||
color_code = COLORS.get(color.lower(), COLORS["reset"])
|
||||
|
||||
if show_time:
|
||||
# Add timestamp
|
||||
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
|
||||
print(f"{COLORS['grey']}{timestamp}{COLORS['reset']}", end="")
|
||||
|
||||
# Add message type based on color
|
||||
if color.lower() in ["green", "cyan", "blue", "purple"]:
|
||||
print(f"{COLORS['green']} INF {COLORS['reset']}", end="")
|
||||
elif color.lower() == "yellow":
|
||||
print(f"{COLORS['yellow']} WRN {COLORS['reset']}", end="")
|
||||
elif color.lower() == "red":
|
||||
print(f"{COLORS['red']} ERR {COLORS['reset']}", end="")
|
||||
|
||||
# Print the actual message with color
|
||||
print(f"{color_code}{message}{COLORS['reset']}")
|
||||
|
||||
|
||||
def println(message, color=None):
|
||||
"""Print a line with optional color"""
|
||||
if color:
|
||||
printfe(color, message)
|
||||
else:
|
||||
printfe("reset", message)
|
||||
|
||||
|
||||
def _rainbow_color(text, freq=0.1, offset=0):
|
||||
"""Apply rainbow colors to text similar to lolcat"""
|
||||
colored_text = ""
|
||||
for i, char in enumerate(text):
|
||||
if char.strip(): # Only color non-whitespace characters
|
||||
# Calculate RGB values using sine waves with phase shifts
|
||||
r = int(127 * math.sin(freq * i + offset + 0) + 128)
|
||||
g = int(127 * math.sin(freq * i + offset + 2 * math.pi / 3) + 128)
|
||||
b = int(127 * math.sin(freq * i + offset + 4 * math.pi / 3) + 128)
|
||||
|
||||
# Apply the RGB color to the character
|
||||
colored_text += f"\033[38;2;{r};{g};{b}m{char}\033[0m"
|
||||
else:
|
||||
colored_text += char
|
||||
|
||||
return colored_text
|
||||
|
||||
|
||||
def logo(continue_after=False):
|
||||
"""Display the dotfiles logo"""
|
||||
try:
|
||||
# Try to read logo file first for backward compatibility
|
||||
if pyfiglet:
|
||||
# Generate ASCII art with pyfiglet and rainbow colors
|
||||
ascii_art = pyfiglet.figlet_format("Menno's Dotfiles", font="slant")
|
||||
print("\n") # Add some space before the logo
|
||||
|
||||
# Use a random offset to vary the rainbow start position
|
||||
random_offset = random.random() * 2 * math.pi
|
||||
line_offset = 0
|
||||
|
||||
for line in ascii_art.splitlines():
|
||||
# Add a little variation to each line
|
||||
print(_rainbow_color(line, offset=random_offset + line_offset))
|
||||
line_offset += 0.1
|
||||
else:
|
||||
# Fallback if pyfiglet is not available
|
||||
printfe("yellow", "\n *** Menno's Dotfiles ***\n")
|
||||
printfe("cyan", " Note: Install pyfiglet for better logo display")
|
||||
printfe("cyan", " (pip install pyfiglet)\n")
|
||||
|
||||
if not continue_after:
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
printfe("red", f"Error displaying logo: {e}")
|
||||
|
||||
|
||||
def run_command(command, shell=False):
|
||||
"""Run a shell command and return the result"""
|
||||
try:
|
||||
if not shell and not shutil.which(command[0]):
|
||||
return False, f"Command '{command[0]}' not found"
|
||||
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=shell,
|
||||
check=True,
|
||||
text=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
return True, result.stdout.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
return False, e.stderr.strip()
|
||||
except FileNotFoundError:
|
||||
return False, f"Command '{command[0]}' not found"
|
||||
|
||||
|
||||
def command_exists(command):
|
||||
"""Check if a command exists in the PATH"""
|
||||
return shutil.which(command) is not None
|
||||
|
||||
|
||||
def ensure_dependencies():
|
||||
"""Check and install required dependencies for the dotfiles system"""
|
||||
required_packages = [
|
||||
"pyfiglet", # For ASCII art generation
|
||||
]
|
||||
|
||||
# Check if pip is available
|
||||
success, _ = run_command(["pip", "--version"])
|
||||
if not success:
|
||||
printfe(
|
||||
"red",
|
||||
"Pip is required to install missing dependencies, retry after running `dotf update`",
|
||||
)
|
||||
return False
|
||||
|
||||
missing_packages = []
|
||||
for package in required_packages:
|
||||
try:
|
||||
__import__(package)
|
||||
except ImportError:
|
||||
missing_packages.append(package)
|
||||
|
||||
if missing_packages:
|
||||
printfe("yellow", f"Missing dependencies: {', '.join(missing_packages)}")
|
||||
install = input("Would you like to install them now? (y/n): ").lower()
|
||||
if install == "y" or install == "yes":
|
||||
printfe("cyan", "Installing missing dependencies...")
|
||||
for package in missing_packages:
|
||||
printfe("blue", f"Installing {package}...")
|
||||
success, output = run_command(
|
||||
["pip", "install", "--user", package, "--break-system-packages"]
|
||||
)
|
||||
if success:
|
||||
printfe("green", f"Successfully installed {package}")
|
||||
else:
|
||||
printfe("red", f"Failed to install {package}: {output}")
|
||||
|
||||
printfe("green", "All dependencies have been processed")
|
||||
return True
|
||||
else:
|
||||
printfe("yellow", "Skipping dependency installation")
|
||||
return False
|
||||
return True
|
@ -1,262 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#Color print function, usage: println "message" "color"
|
||||
println() {
|
||||
color=$2
|
||||
printfe "%s\n" $color "$1"
|
||||
}
|
||||
|
||||
is_wsl() {
|
||||
if [ -f "/proc/sys/fs/binfmt_misc/WSLInterop" ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
logo() {
|
||||
echo "Menno's Dotfiles" | figlet | lolcat
|
||||
|
||||
if [[ $(trash-list | wc -l) -gt 0 ]]; then
|
||||
printfe "%s" "yellow" "[!] $(trash-list | wc -l | tr -d ' ') file(s) in trash - "
|
||||
fi
|
||||
|
||||
# Print if repo is dirty and the count of untracked files, modified files and staged files
|
||||
if [[ $(git -C ~/dotfiles status --porcelain) ]]; then
|
||||
printfe "%s" "yellow" "dotfiles is dirty "
|
||||
printfe "%s" "red" "[$(git -C ~/dotfiles status --porcelain | grep -c '^??')] untracked "
|
||||
printfe "%s" "yellow" "[$(git -C ~/dotfiles status --porcelain | grep -c '^ M')] modified "
|
||||
printfe "%s" "green" "[$(git -C ~/dotfiles status --porcelain | grep -c '^M ')] staged "
|
||||
fi
|
||||
|
||||
printfe "%s" "blue" "[$(git -C ~/dotfiles rev-parse --short HEAD)] "
|
||||
if [[ $(git -C ~/dotfiles log origin/master..HEAD) ]]; then
|
||||
printfe "%s" "yellow" "[!] You have $(git -C ~/dotfiles log origin/master..HEAD --oneline | wc -l | tr -d ' ') commit(s) to push"
|
||||
fi
|
||||
|
||||
println "" "normal"
|
||||
}
|
||||
|
||||
# print colored with printf (args: format, color, message ...)
|
||||
printfe() {
|
||||
format=$1
|
||||
color=$2
|
||||
shift 2
|
||||
|
||||
red=$(tput setaf 1)
|
||||
green=$(tput setaf 2)
|
||||
yellow=$(tput setaf 3)
|
||||
blue=$(tput setaf 4)
|
||||
magenta=$(tput setaf 5)
|
||||
cyan=$(tput setaf 6)
|
||||
normal=$(tput sgr0)
|
||||
|
||||
case $color in
|
||||
"red")
|
||||
color=$red
|
||||
;;
|
||||
"green")
|
||||
color=$green
|
||||
;;
|
||||
"yellow")
|
||||
color=$yellow
|
||||
;;
|
||||
"blue")
|
||||
color=$blue
|
||||
;;
|
||||
"magenta")
|
||||
color=$magenta
|
||||
;;
|
||||
"cyan")
|
||||
color=$cyan
|
||||
;;
|
||||
*)
|
||||
color=$normal
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "$color$format$normal" "$@"
|
||||
}
|
||||
|
||||
ensure_package_installed() {
|
||||
if ! command -v $1 &>/dev/null; then
|
||||
println "$1 is not installed. Please install it." "red"
|
||||
exit 1
|
||||
fi
|
||||
println " - $1 is available." "green"
|
||||
}
|
||||
|
||||
ensure_sudo_privileges() {
|
||||
if sudo -n true 2>/dev/null; then
|
||||
return
|
||||
else
|
||||
println "$1" "yellow"
|
||||
sudo true
|
||||
fi
|
||||
}
|
||||
|
||||
function exesudo ()
|
||||
{
|
||||
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
|
||||
#
|
||||
# LOCAL VARIABLES:
|
||||
#
|
||||
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
|
||||
|
||||
#
|
||||
# I use underscores to remember it's been passed
|
||||
local _funcname_="$1"
|
||||
|
||||
local params=( "$@" ) ## array containing all params passed here
|
||||
local tmpfile="/dev/shm/$RANDOM" ## temporary file
|
||||
local content ## content of the temporary file
|
||||
local regex ## regular expression
|
||||
|
||||
|
||||
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
|
||||
#
|
||||
# MAIN CODE:
|
||||
#
|
||||
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
|
||||
|
||||
#
|
||||
# WORKING ON PARAMS:
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
#
|
||||
# Shift the first param (which is the name of the function)
|
||||
unset params[0] ## remove first element
|
||||
# params=( "${params[@]}" ) ## repack array
|
||||
|
||||
|
||||
#
|
||||
# WORKING ON THE TEMPORARY FILE:
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
content="#!/bin/bash\n\n"
|
||||
|
||||
#
|
||||
# Write the params array
|
||||
content="${content}params=(\n"
|
||||
|
||||
regex="\s+"
|
||||
for param in "${params[@]}"
|
||||
do
|
||||
if [[ "$param" =~ $regex ]]
|
||||
then
|
||||
content="${content}\t\"${param}\"\n"
|
||||
else
|
||||
content="${content}\t${param}\n"
|
||||
fi
|
||||
done
|
||||
|
||||
content="$content)\n"
|
||||
echo -e "$content" > "$tmpfile"
|
||||
|
||||
#
|
||||
# Append the function source
|
||||
echo "#$( type "$_funcname_" )" >> "$tmpfile"
|
||||
|
||||
#
|
||||
# Append the call to the function
|
||||
echo -e "\n$_funcname_ \"\${params[@]}\"\n" >> "$tmpfile"
|
||||
|
||||
|
||||
#
|
||||
# DONE: EXECUTE THE TEMPORARY FILE WITH SUDO
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
sudo bash "$tmpfile"
|
||||
rm "$tmpfile"
|
||||
}
|
||||
|
||||
resolve_path() {
|
||||
echo "$(cd "$(dirname "$1")"; pwd)/$(basename "$1")"
|
||||
}
|
||||
|
||||
check_or_make_symlink() {
|
||||
source /home/menno/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
SOURCE="$1"
|
||||
TARGET="$2"
|
||||
|
||||
# Take any ~ and replace it with $HOME
|
||||
SOURCE="${SOURCE/#\~/$HOME}"
|
||||
TARGET="${TARGET/#\~/$HOME}"
|
||||
|
||||
# Ensure the parent directory of the target exists
|
||||
mkdir -p "$(dirname "$TARGET")"
|
||||
|
||||
# if source doesn't exist it's likely a secret that hasn't been decrypted yet
|
||||
if [ ! -e "$SOURCE" ]; then
|
||||
printfe "%s\n" "yellow" " - Source $SOURCE doesn't exist"
|
||||
return
|
||||
fi
|
||||
|
||||
SOURCE=$(resolve_path "$SOURCE")
|
||||
TARGET=$(resolve_path "$TARGET")
|
||||
|
||||
# Check if we have permissions to create the symlink
|
||||
if [ ! -w "$(dirname "$TARGET")" ]; then
|
||||
# Check if link exists
|
||||
if [ -L "$TARGET" ]; then
|
||||
# Check if it points to the correct location
|
||||
if [ "$(readlink "$TARGET")" != "$SOURCE" ]; then
|
||||
exesudo check_or_make_symlink "$SOURCE" "$TARGET"
|
||||
return
|
||||
fi
|
||||
else
|
||||
# Link doesn't exist but we don't have permissions to create it, so we should try to create it with sudosudo
|
||||
exesudo check_or_make_symlink "$SOURCE" "$TARGET"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# If target is already a symlink, we should check if it points to the correct location
|
||||
if [ -L "$TARGET" ]; then
|
||||
if [ "$(readlink "$TARGET")" != "$SOURCE" ]; then
|
||||
printfe "%s\n" "yellow" " - Symlink $TARGET exists but points to the wrong location"
|
||||
printfe "%s\n" "yellow" " Expected: $SOURCE"
|
||||
printfe "%s\n" "yellow" " Actual: $(readlink "$TARGET")"
|
||||
printfe "%s\n" "yellow" " Fixing symlink"
|
||||
rm "$TARGET"
|
||||
mkdir -p "$(dirname "$TARGET")"
|
||||
ln -s "$SOURCE" "$TARGET"
|
||||
printfe "%s\n" "green" " Created symlink $TARGET -> $SOURCE"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# If target is a file and it's not a symlink, we should back it up
|
||||
if [ -f "$TARGET" ] && [ ! -L "$TARGET" ]; then
|
||||
printfe "%s\n" "yellow" " - File $TARGET exists, backing up and creating symlink"
|
||||
mv "$TARGET" "$TARGET.bak"
|
||||
fi
|
||||
|
||||
# If the target is already a symlink, and it points to the correct location, we should return and be happy
|
||||
if [ -L "$TARGET" ]; then
|
||||
printfe "%s" "green" " - OK: "
|
||||
printfe "%-30s" "blue" "$SOURCE"
|
||||
printfe "%s" "cyan" " -> "
|
||||
printfe "%-30s\n" "blue" "$TARGET"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create the symlink
|
||||
mkdir -p "$(dirname "$TARGET")"
|
||||
ln -s "$SOURCE" "$TARGET"
|
||||
|
||||
# Check if the symlink was created successfully
|
||||
if [ ! -L "$TARGET" ]; then
|
||||
printfe "%s\n" "red" " - Failed to create symlink $TARGET -> $SOURCE"
|
||||
return
|
||||
fi
|
||||
|
||||
printfe "%s" "green" " - Added new symlink: "
|
||||
printfe "%-30s" "blue" "$SOURCE"
|
||||
printfe "%s" "cyan" " -> "
|
||||
printfe "%-30s\n" "blue" "$TARGET"
|
||||
}
|
||||
|
||||
clear_line() {
|
||||
echo -en "\r"
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source $HOME/dotfiles/bin/helpers/functions.sh
|
||||
|
||||
ensure_pipx_packages_installed() {
|
||||
pipx_packages=($(cat $DOTFILES_CONFIG | shyaml get-values config.packages.pipx))
|
||||
for i in "${pipx_packages[@]}";
|
||||
do
|
||||
printfe "%s" "cyan" " - Fetching package details for $i"
|
||||
echo -en '\r'
|
||||
|
||||
if pipx list | grep --quiet ${i}; then
|
||||
printfe "%s\n" "green" " - $i is already installed."
|
||||
continue
|
||||
fi
|
||||
|
||||
printfe "%s" "cyan" " - Installing $i..."
|
||||
echo -en '\r'
|
||||
|
||||
pipx install $i
|
||||
if [ $? -ne 0 ]; then
|
||||
printfe "%s\n" "red" " - Failed to install $i"
|
||||
continue
|
||||
fi
|
||||
|
||||
printfe "%s\n" "green" " - $i installed."
|
||||
done
|
||||
}
|
||||
|
||||
print_pipx_status() {
|
||||
printfe "%s" "cyan" "Checking pipx packages..."
|
||||
clear_line
|
||||
|
||||
pipx_packages=($(cat $DOTFILES_CONFIG | shyaml get-values config.packages.pipx))
|
||||
count=$(echo $pipx_packages | wc -w)
|
||||
installed=0
|
||||
|
||||
for package in "${pipx_packages[@]}"; do
|
||||
if pipx list | grep -q $package; then
|
||||
installed=$((installed + 1))
|
||||
else
|
||||
if [ "$verbose" = true ]; then
|
||||
printfe "%s\n" "red" "$package is not installed"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
printfe "%s" "cyan" "pipx"
|
||||
if [ $installed -eq $count ]; then
|
||||
printfe "%s" "green" " $installed/$count "
|
||||
else
|
||||
printfe "%s" "red" " $installed/$count "
|
||||
fi
|
||||
printfe "%s\n" "cyan" "packages installed"
|
||||
}
|
@ -1,11 +1,35 @@
|
||||
|
||||
Usage: dotf [OPTIONS] [ARGS]
|
||||
|
||||
update: Pull latest changes, and update symlinks and configurations
|
||||
Also pulls latest nix channels and updates flakes to latest versions.
|
||||
upgrade: Runs switch, flake variants for nix switch with upgrade and home-manager.
|
||||
update: Update everything in the dotfiles repository.
|
||||
Options:
|
||||
--ha, -H Upgrade Home Manager packages
|
||||
--ansible, -A Upgrade Ansible packages
|
||||
--ansible-verbose Upgrade Ansible packages with verbose output (-vvv)
|
||||
--full-speed, -F Use all available cores for compilation (Default: 8 cores)
|
||||
|
||||
secrets: Encrypt and decrypt secrets.
|
||||
Commands:
|
||||
encrypt Encrypt all files in the secrets folder
|
||||
decrypt Decrypt all .gpg files in the secrets folder
|
||||
|
||||
service: Manage Docker services for development.
|
||||
Commands:
|
||||
start SERVICE Start a Docker service
|
||||
stop SERVICE Stop a Docker service
|
||||
restart SERVICE Restart a Docker service
|
||||
update SERVICE Update a Docker service (pull new images and recreate)
|
||||
update --all Update all running services
|
||||
logs SERVICE Show Docker service logs
|
||||
ps [SERVICE] Show Docker service status
|
||||
list, ls List available Docker services
|
||||
|
||||
lint: Run linters on dotfiles.
|
||||
Options:
|
||||
--ansible Run only ansible-lint
|
||||
--nix Run only nixfmt
|
||||
--python Run only Python linters (pylint, black)
|
||||
--fix Auto-fix issues where possible
|
||||
|
||||
auto-start: Start a set of pre-defined applications.
|
||||
hello: Shows the welcome message for the terminal.
|
||||
help: Shows this help message
|
||||
|
22
cloud-config
Normal file
22
cloud-config
Normal file
@ -0,0 +1,22 @@
|
||||
#cloud-config
|
||||
|
||||
# Create a user named menno with sudo privileges
|
||||
users:
|
||||
- name: menno
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
groups: sudo, adm
|
||||
shell: /bin/bash
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM+sKpcREOUjwMMSzEWAso6830wbOi8kUxqpuXWw5gHr menno_1password
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE22Hfx8wgkc57TXX1TCMHcNrCdjbfog5QeHFJfl7IeD menno_fallback
|
||||
|
||||
# Update package lists and install latest updates
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
# Configure system to preserve hostname
|
||||
preserve_hostname: false
|
||||
hostname: mennos-vm
|
||||
|
||||
# Final message when cloud-init completes
|
||||
final_message: "Cloud-init has finished setting up the system with user 'menno'. System boot completed after $UPTIME seconds."
|
25
config/ansible/README.md
Normal file
25
config/ansible/README.md
Normal file
@ -0,0 +1,25 @@
|
||||
# Ansible Configuration
|
||||
|
||||
## 1Password Integration
|
||||
|
||||
This Ansible configuration includes a custom lookup plugin for fetching secrets from 1Password.
|
||||
The 1Password CLI must be installed and authenticated on the machine running Ansible.
|
||||
|
||||
See [1Password Integration Readme](plugins/lookup/README.md)
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. Install 1Password CLI
|
||||
2. Sign in to 1Password using `op signin`
|
||||
3. Service account should be properly configured
|
||||
|
||||
### Finding Vault IDs
|
||||
|
||||
To find your vault ID:
|
||||
|
||||
```bash
|
||||
op vault list
|
||||
```
|
||||
|
||||
For more information, see the [1Password CLI documentation](https://developer.1password.com/docs/cli).
|
||||
```
|
5
config/ansible/ansible.cfg
Normal file
5
config/ansible/ansible.cfg
Normal file
@ -0,0 +1,5 @@
|
||||
[defaults]
|
||||
inventory = inventory
|
||||
roles_path = roles
|
||||
collections_paths = collections
|
||||
retry_files_enabled = False
|
3
config/ansible/group_vars/servers.yml
Normal file
3
config/ansible/group_vars/servers.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
flatpaks: false
|
||||
install_ui_apps: false
|
3
config/ansible/group_vars/workstations.yml
Normal file
3
config/ansible/group_vars/workstations.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
flatpaks: true
|
||||
install_ui_apps: true
|
12
config/ansible/handlers/main.yml
Normal file
12
config/ansible/handlers/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Systemctl daemon-reload
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Restart SSH service
|
||||
become: true
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: restarted
|
||||
enabled: true
|
10
config/ansible/inventory.ini
Normal file
10
config/ansible/inventory.ini
Normal file
@ -0,0 +1,10 @@
|
||||
[workstations]
|
||||
mennos-laptop ansible_connection=local
|
||||
mennos-desktop ansible_connection=local
|
||||
|
||||
[servers]
|
||||
mennos-server ansible_connection=local
|
||||
mennos-cloud-server ansible_connection=local
|
||||
mennos-hobbypc ansible_connection=local
|
||||
mennos-vm ansible_connection=local
|
||||
dotfiles-test ansible_connection=local
|
19
config/ansible/playbook.yml
Normal file
19
config/ansible/playbook.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Configure all hosts
|
||||
hosts: all
|
||||
handlers:
|
||||
- name: Import handler tasks
|
||||
ansible.builtin.import_tasks: handlers/main.yml
|
||||
gather_facts: true
|
||||
|
||||
tasks:
|
||||
- name: Include global tasks
|
||||
ansible.builtin.import_tasks: tasks/global/global.yml
|
||||
|
||||
- name: Include workstation tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/workstation.yml
|
||||
when: hostname in ['mennos-laptop', 'mennos-desktop']
|
||||
|
||||
- name: Include server tasks
|
||||
ansible.builtin.import_tasks: tasks/servers/server.yml
|
||||
when: hostname in ['mennos-server', 'mennos-cloud-server', 'mennos-hobbypc', 'mennos-vm', 'dotfiles-test']
|
4
config/ansible/requirements.yml
Normal file
4
config/ansible/requirements.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
# Collections section
|
||||
collections:
|
||||
- community.general
|
51
config/ansible/tasks/global/docker.yml
Normal file
51
config/ansible/tasks/global/docker.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Check if Docker CE is installed
|
||||
ansible.builtin.command: docker --version
|
||||
register: docker_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Download Docker installation script
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.docker.com
|
||||
dest: /tmp/get-docker.sh
|
||||
mode: "0755"
|
||||
when: docker_check.rc != 0
|
||||
|
||||
- name: Install Docker CE
|
||||
ansible.builtin.shell: bash -c 'set -o pipefail && sh /tmp/get-docker.sh'
|
||||
args:
|
||||
executable: /bin/bash
|
||||
creates: /usr/bin/docker
|
||||
when: docker_check.rc != 0
|
||||
|
||||
- name: Add user to docker group
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
groups: docker
|
||||
append: true
|
||||
become: true
|
||||
when: docker_check.rc != 0
|
||||
|
||||
- name: Check if docker is running
|
||||
ansible.builtin.systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
become: true
|
||||
register: docker_service
|
||||
|
||||
- name: Reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
when: docker_service.changed
|
||||
|
||||
- name: Enable and start docker service
|
||||
ansible.builtin.systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
become: true
|
||||
when: docker_service.changed
|
||||
notify: Reload systemd
|
64
config/ansible/tasks/global/global.yml
Normal file
64
config/ansible/tasks/global/global.yml
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Include global symlinks tasks
|
||||
ansible.builtin.import_tasks: tasks/global/symlinks.yml
|
||||
|
||||
- name: Gather package facts
|
||||
ansible.builtin.package_facts:
|
||||
manager: auto
|
||||
become: true
|
||||
|
||||
- name: Include Tailscale tasks
|
||||
ansible.builtin.import_tasks: tasks/global/tailscale.yml
|
||||
become: true
|
||||
|
||||
- name: Include Rust tasks
|
||||
ansible.builtin.import_tasks: tasks/global/rust.yml
|
||||
become: true
|
||||
|
||||
- name: Include Docker tasks
|
||||
ansible.builtin.import_tasks: tasks/global/docker.yml
|
||||
become: true
|
||||
|
||||
- name: Include Ollama tasks
|
||||
ansible.builtin.import_tasks: tasks/global/ollama.yml
|
||||
become: true
|
||||
|
||||
- name: Include OpenSSH Server tasks
|
||||
ansible.builtin.import_tasks: tasks/global/openssh-server.yml
|
||||
become: true
|
||||
|
||||
- name: Include Utils tasks
|
||||
ansible.builtin.import_tasks: tasks/global/utils.yml
|
||||
become: true
|
||||
|
||||
- name: Ensure common packages are installed
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- git
|
||||
- vim
|
||||
- trash-cli
|
||||
- curl
|
||||
- wget
|
||||
- httpie
|
||||
# Python is used for the dotfiles CLI tools
|
||||
- python3
|
||||
- python3-pip
|
||||
- python3-venv
|
||||
- pylint
|
||||
- black
|
||||
# Package manager wrapper
|
||||
- nala
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Ensure ~/.hushlogin exists
|
||||
ansible.builtin.stat:
|
||||
path: ~/.hushlogin
|
||||
register: hushlogin_stat
|
||||
|
||||
- name: Create ~/.hushlogin if it does not exist
|
||||
ansible.builtin.file:
|
||||
path: ~/.hushlogin
|
||||
state: touch
|
||||
mode: "0644"
|
||||
when: not hushlogin_stat.stat.exists
|
27
config/ansible/tasks/global/ollama.yml
Normal file
27
config/ansible/tasks/global/ollama.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Check if Ollama is installed
|
||||
ansible.builtin.command: ollama --version
|
||||
register: ollama_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Download Ollama install script
|
||||
ansible.builtin.get_url:
|
||||
url: https://ollama.com/install.sh
|
||||
dest: /tmp/install_ollama.sh
|
||||
mode: "0755"
|
||||
when: ollama_check.rc != 0
|
||||
|
||||
- name: Install Ollama
|
||||
ansible.builtin.command: bash -c 'set -o pipefail && sh /tmp/install_ollama.sh'
|
||||
when: ollama_check.rc != 0
|
||||
args:
|
||||
creates: /usr/local/bin/ollama
|
||||
|
||||
- name: Check if Ollama is running
|
||||
ansible.builtin.systemd:
|
||||
name: ollama
|
||||
state: started
|
||||
enabled: true
|
||||
become: true
|
||||
register: ollama_service
|
21
config/ansible/tasks/global/openssh-server.yml
Normal file
21
config/ansible/tasks/global/openssh-server.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Ensure openssh-server is installed
|
||||
ansible.builtin.package:
|
||||
name: openssh-server
|
||||
state: present
|
||||
|
||||
- name: Ensure SSH service is enabled and running
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure SSH server configuration is proper
|
||||
ansible.builtin.template:
|
||||
src: templates/sshd_config.j2
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
validate: "/usr/sbin/sshd -t -f %s"
|
||||
notify: Restart SSH service
|
24
config/ansible/tasks/global/rust.yml
Normal file
24
config/ansible/tasks/global/rust.yml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Check if Rust is installed
|
||||
ansible.builtin.shell: source $HOME/.cargo/env && rustc --version
|
||||
register: rust_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Download Rust installation script
|
||||
ansible.builtin.get_url:
|
||||
url: https://sh.rustup.rs
|
||||
dest: /tmp/rustup.sh
|
||||
mode: "0755"
|
||||
when: rust_check.rc != 0
|
||||
|
||||
- name: Install Rust and Cargo
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
/tmp/rustup.sh -y
|
||||
args:
|
||||
executable: /bin/bash
|
||||
creates: ~/.cargo/bin/rustc
|
||||
when: rust_check.rc != 0
|
52
config/ansible/tasks/global/symlinks.yml
Normal file
52
config/ansible/tasks/global/symlinks.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
- name: Set user home directory
|
||||
ansible.builtin.set_fact:
|
||||
user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}"
|
||||
|
||||
- name: Create basic symlinks
|
||||
ansible.builtin.file:
|
||||
src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
|
||||
dest: "{{ item.dest | replace('~', user_home) }}"
|
||||
state: link
|
||||
force: true
|
||||
follow: false
|
||||
loop:
|
||||
- { src: "$DOTFILES_PATH/config/home-manager", dest: "~/.config/home-manager" }
|
||||
- { src: "$DOTFILES_PATH/config/ssh/config", dest: "~/.ssh/config" }
|
||||
- { src: "$DOTFILES_PATH/config/ssh/config.d", dest: "~/.ssh/config.d" }
|
||||
- { src: "$DOTFILES_PATH/config/starship.toml", dest: "~/.config/starship.toml" }
|
||||
- { src: "$DOTFILES_PATH/.bashrc", dest: "~/.bashrc.extra" }
|
||||
|
||||
- name: Create gitconfig symlink
|
||||
ansible.builtin.file:
|
||||
src: "{{ gitconfig_mapping[hostname] | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
|
||||
dest: "{{ user_home }}/.gitconfig"
|
||||
state: link
|
||||
force: true
|
||||
follow: false
|
||||
vars:
|
||||
gitconfig_mapping:
|
||||
mennos-desktop: "$DOTFILES_PATH/config/git/gitconfig.linux"
|
||||
mennos-laptop: "$DOTFILES_PATH/config/git/gitconfig.linux"
|
||||
mennos-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
|
||||
mennos-cloud-server: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
|
||||
mennos-vm: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
|
||||
mennos-hobbypc: "$DOTFILES_PATH/config/git/gitconfig.linux"
|
||||
dotfiles-test: "$DOTFILES_PATH/config/git/gitconfig.mennos-server"
|
||||
|
||||
- name: Create SSH authorized_keys symlink
|
||||
ansible.builtin.file:
|
||||
src: "{{ authorized_keys_mapping[hostname] | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
|
||||
dest: "{{ user_home }}/.ssh/authorized_keys"
|
||||
state: link
|
||||
force: true
|
||||
follow: false
|
||||
vars:
|
||||
authorized_keys_mapping:
|
||||
mennos-desktop: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-desktop"
|
||||
mennos-laptop: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-laptop"
|
||||
mennos-server: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-server"
|
||||
mennos-cloud-server: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-server"
|
||||
mennos-vm: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-server"
|
||||
mennos-hobbypc: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-hobbypc"
|
||||
dotfiles-test: "$DOTFILES_PATH/config/ssh/authorized_keys/mennos-server"
|
32
config/ansible/tasks/global/tailscale.yml
Normal file
32
config/ansible/tasks/global/tailscale.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Check if Tailscale is installed
|
||||
ansible.builtin.command: which tailscale
|
||||
register: tailscale_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Install Tailscale using curl script
|
||||
ansible.builtin.shell: curl -fsSL https://tailscale.com/install.sh | sh
|
||||
args:
|
||||
creates: /usr/bin/tailscale
|
||||
when: tailscale_check.rc != 0
|
||||
become: true
|
||||
|
||||
- name: Check if Tailscale is running
|
||||
ansible.builtin.command: tailscale status
|
||||
register: tailscale_status
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Enable and start Tailscale service
|
||||
ansible.builtin.systemd:
|
||||
name: tailscaled
|
||||
state: started
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
- name: Notify user to authenticate Tailscale
|
||||
ansible.builtin.debug:
|
||||
msg: "Please authenticate Tailscale by running: sudo tailscale up --operator=$USER"
|
||||
when: tailscale_status.rc != 0
|
32
config/ansible/tasks/global/utils.yml
Normal file
32
config/ansible/tasks/global/utils.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Load DOTFILES_PATH environment variable
|
||||
ansible.builtin.set_fact:
|
||||
dotfiles_path: "{{ lookup('env', 'DOTFILES_PATH') }}"
|
||||
|
||||
- name: Ensure ~/.local/bin exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ ansible_env.HOME }}/.local/bin"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Scan utils folder and create symlinks in ~/.local/bin
|
||||
ansible.builtin.find:
|
||||
paths: "{{ dotfiles_path }}/config/ansible/tasks/global/utils"
|
||||
file_type: file
|
||||
register: utils_files
|
||||
|
||||
- name: Create symlinks for utils scripts
|
||||
ansible.builtin.file:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename }}"
|
||||
state: link
|
||||
loop: "{{ utils_files.files }}"
|
||||
when: not item.path | regex_search('\.go$')
|
||||
become: false
|
||||
|
||||
- name: Compile Go files and place binaries in ~/.local/bin
|
||||
ansible.builtin.command:
|
||||
cmd: go build -o "{{ ansible_env.HOME }}/.local/bin/{{ item.path | basename | regex_replace('\.go$', '') }}" "{{ item.path }}"
|
||||
loop: "{{ utils_files.files }}"
|
||||
when: item.path | regex_search('\.go$')
|
||||
become: false
|
7
config/ansible/tasks/global/utils/helloworld.go
Normal file
7
config/ansible/tasks/global/utils/helloworld.go
Normal file
@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
func main() {
|
||||
fmt.Println("Hello, World!")
|
||||
}
|
249
config/ansible/tasks/global/utils/ipaddr
Executable file
249
config/ansible/tasks/global/utils/ipaddr
Executable file
@ -0,0 +1,249 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import argparse
|
||||
import requests
|
||||
|
||||
def get_physical_interfaces():
|
||||
"""
|
||||
Retrieve a list of physical network interfaces on the system.
|
||||
|
||||
This function checks the `/sys/class/net/` directory to identify physical
|
||||
network interfaces. It determines if an interface is physical by verifying
|
||||
the presence of a symbolic link to a `device` directory.
|
||||
|
||||
Returns:
|
||||
list: A list of strings, where each string is the name of a physical
|
||||
network interface.
|
||||
"""
|
||||
interfaces_path = '/sys/class/net/'
|
||||
physical_interfaces = []
|
||||
|
||||
for interface in os.listdir(interfaces_path):
|
||||
if not os.path.islink(os.path.join(interfaces_path, interface, 'device')):
|
||||
continue
|
||||
physical_interfaces.append(interface)
|
||||
|
||||
return physical_interfaces
|
||||
|
||||
def get_virtual_interfaces():
|
||||
"""
|
||||
Retrieves a list of virtual network interfaces on the system.
|
||||
|
||||
This function scans the network interfaces available in the '/sys/class/net/'
|
||||
directory and filters out physical interfaces and the loopback interface ('lo').
|
||||
It identifies virtual interfaces by checking if the 'device' path is not a
|
||||
symbolic link.
|
||||
|
||||
Returns:
|
||||
list: A list of virtual network interface names as strings.
|
||||
"""
|
||||
interfaces_path = '/sys/class/net/'
|
||||
virtual_interfaces = []
|
||||
|
||||
for interface in os.listdir(interfaces_path):
|
||||
if os.path.islink(os.path.join(interfaces_path, interface, 'device')):
|
||||
continue
|
||||
if interface == 'lo':
|
||||
continue
|
||||
virtual_interfaces.append(interface)
|
||||
|
||||
return virtual_interfaces
|
||||
|
||||
def get_up_interfaces(interfaces):
|
||||
"""
|
||||
Filters the given list of interfaces to include only those that are up or unknown.
|
||||
|
||||
Args:
|
||||
interfaces (list): A list of interface names.
|
||||
|
||||
Returns:
|
||||
list: A list of interface names that are up or treated as up (e.g., UNKNOWN).
|
||||
"""
|
||||
up_interfaces = []
|
||||
for interface in interfaces:
|
||||
try:
|
||||
result = subprocess.run(['ip', 'link', 'show', interface],
|
||||
capture_output=True, text=True, check=True)
|
||||
if "state UP" in result.stdout or "state UNKNOWN" in result.stdout:
|
||||
up_interfaces.append(interface)
|
||||
except Exception:
|
||||
continue
|
||||
return up_interfaces
|
||||
|
||||
def get_interface_state(interface):
|
||||
"""
|
||||
Retrieve the state and MAC address of a network interface.
|
||||
|
||||
Args:
|
||||
interface (str): The name of the network interface.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the state (str) and MAC address (str) of the interface.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(['ip', 'link', 'show', interface],
|
||||
capture_output=True, text=True, check=True)
|
||||
lines = result.stdout.splitlines()
|
||||
state = "UNKNOWN"
|
||||
mac = "N/A"
|
||||
|
||||
if len(lines) > 0:
|
||||
if "state UP" in lines[0]:
|
||||
state = "UP"
|
||||
elif "state DOWN" in lines[0]:
|
||||
state = "DOWN"
|
||||
elif "state UNKNOWN" in lines[0]:
|
||||
state = "UP" # Treat UNKNOWN as UP
|
||||
|
||||
if len(lines) > 1:
|
||||
mac = lines[1].strip().split()[1] if len(lines[1].strip().split()) > 1 else "N/A"
|
||||
|
||||
return state, mac
|
||||
except Exception:
|
||||
return "UNKNOWN", "N/A"
|
||||
|
||||
def get_external_ips():
|
||||
"""
|
||||
Fetch both IPv4 and IPv6 external IP addresses of the machine.
|
||||
|
||||
This function first attempts to retrieve an IP address using the services
|
||||
`https://ifconfig.co`, `https://ifconfig.io`, and `https://ifconfig.me`. If the
|
||||
first IP fetched is IPv6, it explicitly tries to fetch an IPv4 address using
|
||||
curl's `-4` option.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the IPv4 and IPv6 addresses as strings. If either
|
||||
address cannot be fetched, it will be set to "Unavailable".
|
||||
"""
|
||||
services = ["https://ip.mvl.sh", "https://ifconfig.co", "https://api.ipify.org", "https://myexternalip.com/raw", "https://ifconfig.io", "https://ifconfig.me"]
|
||||
headers = {"User-Agent": "curl"}
|
||||
ipv4, ipv6 = "Unavailable", "Unavailable"
|
||||
|
||||
for service in services:
|
||||
try:
|
||||
response = requests.get(service, headers=headers, timeout=0.2)
|
||||
if response.status_code == 200:
|
||||
ip = response.text.strip()
|
||||
if ":" in ip: # IPv6 address
|
||||
ipv6 = ip
|
||||
# Try to fetch IPv4 explicitly
|
||||
ipv4_response = subprocess.run(
|
||||
["curl", "-4", "--silent", service],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=0.2,
|
||||
check=True
|
||||
)
|
||||
if ipv4_response.returncode == 0:
|
||||
ipv4 = ipv4_response.stdout.strip()
|
||||
else: # IPv4 address
|
||||
ipv4 = ip
|
||||
if ipv4 != "Unavailable" and ipv6 != "Unavailable":
|
||||
break
|
||||
except (requests.RequestException, subprocess.TimeoutExpired):
|
||||
continue
|
||||
|
||||
return ipv4, ipv6
|
||||
|
||||
def display_interface_details(show_physical=False, show_virtual=False, show_all=False, show_external_ip=False, show_ipv6=False):
|
||||
"""
|
||||
Display details of network interfaces based on the specified flags.
|
||||
|
||||
Args:
|
||||
show_physical (bool): Show physical interfaces (UP by default unless combined with show_all).
|
||||
show_virtual (bool): Show virtual interfaces (UP by default unless combined with show_all).
|
||||
show_all (bool): Include all interfaces (UP, DOWN, UNKNOWN).
|
||||
show_external_ip (bool): Fetch and display the external IP address.
|
||||
show_ipv6 (bool): Include IPv6 addresses in the output.
|
||||
|
||||
Notes:
|
||||
- By default, only IPv4 addresses are shown unless `-6` is specified.
|
||||
- IPv6 addresses are displayed in a separate column if `-6` is specified.
|
||||
"""
|
||||
if show_external_ip:
|
||||
ipv4, ipv6 = get_external_ips()
|
||||
print(f"External IPv4: {ipv4}")
|
||||
print(f"External IPv6: {ipv6}")
|
||||
print("-" * 70)
|
||||
|
||||
interfaces = []
|
||||
|
||||
if show_all:
|
||||
if show_physical or not show_virtual: # Default to physical if no `-v`
|
||||
interfaces.extend(get_physical_interfaces())
|
||||
if show_virtual:
|
||||
interfaces.extend(get_virtual_interfaces())
|
||||
else:
|
||||
if show_physical or not show_virtual: # Default to physical if no `-v`
|
||||
interfaces.extend(get_up_interfaces(get_physical_interfaces()))
|
||||
if show_virtual or not show_physical: # Default to virtual if no `-p`
|
||||
interfaces.extend(get_up_interfaces(get_virtual_interfaces()))
|
||||
|
||||
interfaces.sort()
|
||||
|
||||
# Define column widths based on expected maximum content length
|
||||
col_widths = {
|
||||
'interface': 15,
|
||||
'ipv4': 18,
|
||||
'ipv6': 40 if show_ipv6 else 0, # Hide IPv6 column if not showing IPv6
|
||||
'subnet': 10,
|
||||
'state': 10,
|
||||
'mac': 18
|
||||
}
|
||||
|
||||
# Print header with proper formatting
|
||||
header = f"{'Interface':<{col_widths['interface']}} {'IPv4 Address':<{col_widths['ipv4']}}"
|
||||
if show_ipv6:
|
||||
header += f" {'IPv6 Address':<{col_widths['ipv6']}}"
|
||||
header += f" {'Subnet':<{col_widths['subnet']}} {'State':<{col_widths['state']}} {'MAC Address':<{col_widths['mac']}}"
|
||||
print(header)
|
||||
print("-" * (col_widths['interface'] + col_widths['ipv4'] + (col_widths['ipv6'] if show_ipv6 else 0) + col_widths['subnet'] + col_widths['state'] + col_widths['mac']))
|
||||
|
||||
for interface in interfaces:
|
||||
try:
|
||||
result = subprocess.run(['ip', '-br', 'addr', 'show', interface],
|
||||
capture_output=True, text=True, check=True)
|
||||
state, mac = get_interface_state(interface)
|
||||
if result.returncode == 0:
|
||||
lines = result.stdout.strip().splitlines()
|
||||
ipv4 = "N/A"
|
||||
ipv6 = "N/A"
|
||||
subnet = ""
|
||||
|
||||
for line in lines:
|
||||
parts = line.split()
|
||||
if len(parts) >= 3:
|
||||
ip_with_mask = parts[2]
|
||||
|
||||
# Check if the address is IPv4 or IPv6
|
||||
if ":" in ip_with_mask: # IPv6
|
||||
ipv6 = ip_with_mask.split('/')[0]
|
||||
else: # IPv4
|
||||
ipv4 = ip_with_mask.split('/')[0]
|
||||
subnet = ip_with_mask.split('/')[1] if '/' in ip_with_mask else ""
|
||||
|
||||
row = f"{interface:<{col_widths['interface']}} {ipv4:<{col_widths['ipv4']}}"
|
||||
if show_ipv6:
|
||||
row += f" {ipv6:<{col_widths['ipv6']}}"
|
||||
row += f" {subnet:<{col_widths['subnet']}} {state:<{col_widths['state']}} {mac:<{col_widths['mac']}}"
|
||||
print(row)
|
||||
except Exception as e:
|
||||
print(f"Error fetching details for {interface}: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description='Display network interface information')
|
||||
parser.add_argument('-p', action='store_true', help='Show physical interfaces (UP by default)')
|
||||
parser.add_argument('-v', action='store_true', help='Show virtual interfaces (UP by default)')
|
||||
parser.add_argument('-a', action='store_true', help='Include all interfaces (UP, DOWN, UNKNOWN)')
|
||||
parser.add_argument('-e', action='store_true', help='Fetch and display the external IP address')
|
||||
parser.add_argument('--ipv6', '-6', action='store_true', help='Include IPv6 addresses in the output')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Default to showing both UP physical and virtual interfaces if no flags are specified
|
||||
display_interface_details(show_physical=args.p or not (args.p or args.v or args.a or args.e),
|
||||
show_virtual=args.v or not (args.p or args.v or args.a or args.e),
|
||||
show_all=args.a,
|
||||
show_external_ip=args.e,
|
||||
show_ipv6=args.ipv6)
|
75
config/ansible/tasks/servers/juicefs.yml
Normal file
75
config/ansible/tasks/servers/juicefs.yml
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
- name: Check if JuiceFS is already installed
|
||||
ansible.builtin.command: which juicefs
|
||||
register: juicefs_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Install JuiceFS using the automatic installer
|
||||
ansible.builtin.shell: curl -sSL https://d.juicefs.com/install | sh -
|
||||
register: juicefs_installation
|
||||
when: juicefs_check.rc != 0
|
||||
become: true
|
||||
|
||||
- name: Verify JuiceFS installation
|
||||
ansible.builtin.command: juicefs version
|
||||
register: juicefs_version
|
||||
changed_when: false
|
||||
when: juicefs_check.rc != 0 or juicefs_installation.changed
|
||||
|
||||
- name: Create mount directory
|
||||
ansible.builtin.file:
|
||||
path: /mnt/object_storage
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Create cache directory
|
||||
ansible.builtin.file:
|
||||
path: /var/jfsCache
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set JuiceFS facts
|
||||
ansible.builtin.set_fact:
|
||||
hetzner_access_key: "{{ lookup('community.general.onepassword', 'mfk2qgnaplgtk6xmfc3r6w6neq', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='AWS_ACCESS_KEY_ID') }}"
|
||||
hetzner_secret_key: "{{ lookup('community.general.onepassword', 'mfk2qgnaplgtk6xmfc3r6w6neq', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='AWS_SECRET_ACCESS_KEY')
|
||||
}}"
|
||||
redis_password: "{{ lookup('community.general.onepassword', '4cioblm633bdkl6put35lk6ql4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='password') }}"
|
||||
|
||||
- name: Create JuiceFS systemd service file
|
||||
ansible.builtin.template:
|
||||
src: templates/juicefs.service.j2
|
||||
dest: /etc/systemd/system/juicefs.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Reload systemd daemon
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
- name: Include JuiceFS Redis tasks
|
||||
ansible.builtin.include_tasks: services/redis/redis.yml
|
||||
|
||||
- name: Enable and start JuiceFS service
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Check if JuiceFS is mounted
|
||||
ansible.builtin.shell: df -h | grep /mnt/object_storage
|
||||
become: true
|
||||
register: mount_check
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Display mount status
|
||||
ansible.builtin.debug:
|
||||
msg: "JuiceFS is successfully mounted at /mnt/object_storage"
|
||||
when: mount_check.rc == 0
|
47
config/ansible/tasks/servers/server.yml
Normal file
47
config/ansible/tasks/servers/server.yml
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Server setup
|
||||
block:
|
||||
- name: Ensure server common packages are installed
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- openssh-server
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Include JuiceFS tasks
|
||||
ansible.builtin.include_tasks: juicefs.yml
|
||||
|
||||
- name: Include services tasks
|
||||
ansible.builtin.include_tasks: services/services.yml
|
||||
vars:
|
||||
services:
|
||||
- name: caddy
|
||||
enabled: true
|
||||
- name: hoarder
|
||||
enabled: true
|
||||
- name: golink
|
||||
enabled: true
|
||||
- name: immich
|
||||
enabled: true
|
||||
- name: gitea
|
||||
enabled: true
|
||||
- name: jellyfin
|
||||
enabled: true
|
||||
- name: seafile
|
||||
enabled: true
|
||||
- name: uptime-kuma
|
||||
enabled: true
|
||||
- name: factorio
|
||||
enabled: true
|
||||
- name: dozzle
|
||||
enabled: true
|
||||
- name: beszel
|
||||
enabled: true
|
||||
- name: arr-stack
|
||||
enabled: true
|
||||
- name: downloaders
|
||||
enabled: true
|
||||
- name: wireguard
|
||||
enabled: true
|
||||
- name: echoip
|
||||
enabled: true
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Deploy ArrStack service
|
||||
block:
|
||||
- name: Set ArrStack directories
|
||||
ansible.builtin.set_fact:
|
||||
arr_stack_service_dir: "{{ ansible_env.HOME }}/services/arr-stack"
|
||||
arr_stack_data_dir: "/mnt/object_storage/services/arr-stack"
|
||||
|
||||
- name: Create ArrStack directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ arr_stack_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create ArrStack data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ arr_stack_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy ArrStack docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ arr_stack_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: arr_stack_template_result
|
||||
|
||||
- name: Stop ArrStack service
|
||||
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: arr_stack_template_result.changed
|
||||
|
||||
- name: Start ArrStack service
|
||||
ansible.builtin.command: docker compose -f "{{ arr_stack_service_dir }}/docker-compose.yml" up -d
|
||||
when: arr_stack_template_result.changed
|
@ -0,0 +1,111 @@
|
||||
name: arr-stack
|
||||
services:
|
||||
radarr:
|
||||
container_name: radarr
|
||||
image: lscr.io/linuxserver/radarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 7878:7878
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/radarr-config:/config
|
||||
- /mnt/object_storage:/storage
|
||||
restart: "unless-stopped"
|
||||
networks:
|
||||
- arr-stack-net
|
||||
|
||||
sonarr:
|
||||
image: linuxserver/sonarr:latest
|
||||
container_name: sonarr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/sonarr-config:/config
|
||||
- /mnt/object_storage:/storage
|
||||
ports:
|
||||
- 8989:8989
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr-stack-net
|
||||
|
||||
lidarr:
|
||||
image: linuxserver/lidarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- 8686:8686
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/lidarr-config:/config
|
||||
- /mnt/object_storage:/storage
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr-stack-net
|
||||
|
||||
prowlarr:
|
||||
container_name: prowlarr
|
||||
image: linuxserver/prowlarr:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/prowlarr-config:/config
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
ports:
|
||||
- 9696:9696
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr-stack-net
|
||||
|
||||
flaresolverr:
|
||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||
container_name: flaresolverr
|
||||
environment:
|
||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||
- LOG_HTML=${LOG_HTML:-false}
|
||||
- CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- "8191:8191"
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr-stack-net
|
||||
|
||||
jellyseerr:
|
||||
image: fallenbagel/jellyseerr
|
||||
container_name: jellyseerr
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ arr_stack_data_dir }}/jellyseerr-config:/app/config
|
||||
ports:
|
||||
- 5055:5055
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- arr-stack-net
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
arr-stack-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
34
config/ansible/tasks/servers/services/beszel/beszel.yml
Normal file
34
config/ansible/tasks/servers/services/beszel/beszel.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Deploy Beszel service
|
||||
block:
|
||||
- name: Set Beszel directories
|
||||
ansible.builtin.set_fact:
|
||||
beszel_service_dir: "{{ ansible_env.HOME }}/services/beszel"
|
||||
beszel_data_dir: "/mnt/object_storage/services/beszel"
|
||||
|
||||
- name: Create Beszel directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ beszel_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Beszel data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ beszel_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Beszel docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ beszel_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: beszel_compose
|
||||
|
||||
- name: Stop Beszel service
|
||||
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: beszel_compose.changed
|
||||
|
||||
- name: Start Beszel service
|
||||
ansible.builtin.command: docker compose -f "{{ beszel_service_dir }}/docker-compose.yml" up -d
|
||||
when: beszel_compose.changed
|
@ -0,0 +1,29 @@
|
||||
services:
|
||||
beszel:
|
||||
image: 'henrygd/beszel'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '8090:8090'
|
||||
volumes:
|
||||
- {{beszel_data_dir}}/data:/beszel_data
|
||||
- {{beszel_data_dir}}/socket:/beszel_socket
|
||||
networks:
|
||||
- beszel-net
|
||||
- caddy_network
|
||||
|
||||
beszel-agent:
|
||||
image: henrygd/beszel-agent:latest
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
volumes:
|
||||
- {{beszel_data_dir}}/socket:/beszel_socket
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
LISTEN: /beszel_socket/beszel.sock
|
||||
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA2e4Eg8BrcYOVZ5MaEdrxErM/HA4Tc0ANxPQNcCwFwY'
|
||||
|
||||
networks:
|
||||
beszel-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
84
config/ansible/tasks/servers/services/caddy/Caddyfile.j2
Normal file
84
config/ansible/tasks/servers/services/caddy/Caddyfile.j2
Normal file
@ -0,0 +1,84 @@
|
||||
photos.vleeuwen.me photos.mvl.sh {
|
||||
reverse_proxy immich:2283
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
hoarder.mvl.sh {
|
||||
reverse_proxy hoarder:3000
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
git.vleeuwen.me git.mvl.sh {
|
||||
reverse_proxy gitea:3000
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
status.vleeuwen.me status.mvl.sh {
|
||||
reverse_proxy uptime-kuma:3001
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
sf.mvl.sh {
|
||||
reverse_proxy seafile:80
|
||||
|
||||
handle /seafdav* {
|
||||
reverse_proxy seafile:8080
|
||||
}
|
||||
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
of.mvl.sh {
|
||||
reverse_proxy onlyoffice:80 {
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {remote}
|
||||
header_up X-Forwarded-For {remote}
|
||||
header_up X-Forwarded-Proto {scheme}
|
||||
}
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
fsm.mvl.sh {
|
||||
reverse_proxy factorio-server-manager:80
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
df.mvl.sh {
|
||||
redir / https://git.mvl.sh/vleeuwenmenno/dotfiles/raw/branch/master/setup.sh
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
overseerr.mvl.sh jellyseerr.mvl.sh overseerr.vleeuwen.me jellyseerr.vleeuwen.me {
|
||||
reverse_proxy jellyseerr:5055
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
jellyfin.mvl.sh jellyfin.vleeuwen.me {
|
||||
reverse_proxy jellyfin:8096
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
fladder.mvl.sh {
|
||||
reverse_proxy fladder:80
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
ip.mvl.sh {
|
||||
reverse_proxy echoip:8080 {
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
header_up X-Forwarded-For {http.request.remote.host}
|
||||
header_up X-Forwarded-Proto {scheme}
|
||||
header_up X-Forwarded-Host {host}
|
||||
}
|
||||
|
||||
tls {{ caddy_email }}
|
||||
}
|
||||
|
||||
http://ip.mvl.sh {
|
||||
reverse_proxy echoip:8080 {
|
||||
header_up X-Real-IP {http.request.remote.host}
|
||||
header_up X-Forwarded-For {http.request.remote.host}
|
||||
header_up X-Forwarded-Proto {scheme}
|
||||
header_up X-Forwarded-Host {host}
|
||||
}
|
||||
}
|
44
config/ansible/tasks/servers/services/caddy/caddy.yml
Normal file
44
config/ansible/tasks/servers/services/caddy/caddy.yml
Normal file
@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Deploy Caddy service
|
||||
block:
|
||||
- name: Set Caddy directories
|
||||
ansible.builtin.set_fact:
|
||||
caddy_service_dir: "{{ ansible_env.HOME }}/services/caddy"
|
||||
caddy_data_dir: "/mnt/object_storage/services/caddy"
|
||||
caddy_email: "{{ lookup('community.general.onepassword', 'qwvcr4cuumhqh3mschv57xdqka', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='email') }}"
|
||||
|
||||
- name: Create Caddy directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ caddy_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Caddy network
|
||||
ansible.builtin.command: docker network create caddy_default
|
||||
register: create_caddy_network
|
||||
failed_when:
|
||||
- create_caddy_network.rc != 0
|
||||
- "'already exists' not in create_caddy_network.stderr"
|
||||
changed_when: create_caddy_network.rc == 0
|
||||
|
||||
- name: Deploy Caddy docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ caddy_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: caddy_compose
|
||||
|
||||
- name: Deploy Caddy Caddyfile
|
||||
ansible.builtin.template:
|
||||
src: Caddyfile.j2
|
||||
dest: "{{ caddy_service_dir }}/Caddyfile"
|
||||
mode: "0644"
|
||||
register: caddy_file
|
||||
|
||||
- name: Stop Caddy service
|
||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: caddy_compose.changed or caddy_file.changed
|
||||
|
||||
- name: Start Caddy service
|
||||
ansible.builtin.command: docker compose -f "{{ caddy_service_dir }}/docker-compose.yml" up -d
|
||||
when: caddy_compose.changed or caddy_file.changed
|
@ -0,0 +1,24 @@
|
||||
services:
|
||||
caddy:
|
||||
image: caddy:2.9.1-alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- {{ caddy_data_dir }}/data:/data
|
||||
- {{ caddy_data_dir }}/config:/config
|
||||
- {{ caddy_service_dir }}/Caddyfile:/etc/caddy/Caddyfile
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
name: caddy_default
|
||||
enable_ipv6: true
|
@ -0,0 +1,62 @@
|
||||
name: downloaders
|
||||
services:
|
||||
gluetun:
|
||||
image: qmcgaw/gluetun:latest
|
||||
privileged: true
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
networks:
|
||||
- arr-stack-net
|
||||
ports:
|
||||
- 6881:6881
|
||||
- 6881:6881/udp
|
||||
- 8085:8085 # Qbittorrent
|
||||
- 7788:8080 # Sabnzbd
|
||||
devices:
|
||||
- /dev/net/tun:/dev/net/tun
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/gluetun-config:/gluetun
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- VPN_SERVICE_PROVIDER={{ lookup('community.general.onepassword', 'qm7lxjrv2ctgzsjuwtolxpd5i4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='VPN_SERVICE_PROVIDER') }}
|
||||
- OPENVPN_USER={{ lookup('community.general.onepassword', 'qm7lxjrv2ctgzsjuwtolxpd5i4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='OPENVPN_USER') }}
|
||||
- OPENVPN_PASSWORD={{ lookup('community.general.onepassword', 'qm7lxjrv2ctgzsjuwtolxpd5i4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='OPENVPN_PASSWORD') }}
|
||||
- SERVER_COUNTRIES={{ lookup('community.general.onepassword', 'qm7lxjrv2ctgzsjuwtolxpd5i4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='SERVER_COUNTRIES') }}
|
||||
restart: always
|
||||
|
||||
sabnzbd:
|
||||
image: lscr.io/linuxserver/sabnzbd:latest
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/sabnzbd-config:/config
|
||||
- {{ object_storage_dir }}:/storage
|
||||
restart: unless-stopped
|
||||
network_mode: "service:gluetun"
|
||||
depends_on:
|
||||
gluetun:
|
||||
condition: service_healthy
|
||||
|
||||
qbittorrent:
|
||||
image: lscr.io/linuxserver/qbittorrent
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- WEBUI_PORT=8085
|
||||
- TZ=Europe/Amsterdam
|
||||
volumes:
|
||||
- {{ downloaders_data_dir }}/qbit-config:/config
|
||||
- {{ object_storage_dir }}:/storage
|
||||
restart: always
|
||||
network_mode: "service:gluetun"
|
||||
depends_on:
|
||||
gluetun:
|
||||
condition: service_healthy
|
||||
|
||||
networks:
|
||||
arr-stack-net:
|
||||
external: true
|
||||
name: arr-stack_arr-stack-net
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Deploy Downloaders service
|
||||
block:
|
||||
- name: Set Downloaders directories
|
||||
ansible.builtin.set_fact:
|
||||
object_storage_dir: "/mnt/object_storage"
|
||||
downloaders_service_dir: "{{ ansible_env.HOME }}/services/downloaders"
|
||||
downloaders_data_dir: "/mnt/object_storage/services/downloaders"
|
||||
|
||||
- name: Create Downloaders directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ downloaders_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Downloaders docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ downloaders_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: downloaders_compose
|
||||
|
||||
- name: Stop Downloaders service
|
||||
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: downloaders_compose.changed
|
||||
|
||||
- name: Start Downloaders service
|
||||
ansible.builtin.command: docker compose -f "{{ downloaders_service_dir }}/docker-compose.yml" up -d
|
||||
when: downloaders_compose.changed
|
@ -0,0 +1,19 @@
|
||||
services:
|
||||
dozzle:
|
||||
image: amir20/dozzle:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
ports:
|
||||
- 8585:8080
|
||||
environment:
|
||||
- DOZZLE_NO_ANALYTICS=true
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dozzle-net
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
dozzle-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
34
config/ansible/tasks/servers/services/dozzle/dozzle.yml
Normal file
34
config/ansible/tasks/servers/services/dozzle/dozzle.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Deploy Dozzle service
|
||||
block:
|
||||
- name: Set Dozzle directories
|
||||
ansible.builtin.set_fact:
|
||||
dozzle_service_dir: "{{ ansible_env.HOME }}/services/dozzle"
|
||||
dozzle_data_dir: "/mnt/object_storage/services/dozzle"
|
||||
|
||||
- name: Create Dozzle directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dozzle_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Dozzle data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ dozzle_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Dozzle docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ dozzle_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: dozzle_compose
|
||||
|
||||
- name: Stop Dozzle service
|
||||
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: dozzle_compose.changed
|
||||
|
||||
- name: Start Dozzle service
|
||||
ansible.builtin.command: docker compose -f "{{ dozzle_service_dir }}/docker-compose.yml" up -d
|
||||
when: dozzle_compose.changed
|
@ -0,0 +1,25 @@
|
||||
services:
|
||||
echoip:
|
||||
container_name: 'echoip'
|
||||
image: 'mpolden/echoip:latest'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8080:8080"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- caddy_network
|
||||
volumes:
|
||||
- {{echoip_data_dir}}/GeoLite2-ASN.mmdb:/opt/echoip/GeoLite2-ASN.mmdb:ro
|
||||
- {{echoip_data_dir}}/GeoLite2-City.mmdb:/opt/echoip/GeoLite2-City.mmdb:ro
|
||||
- {{echoip_data_dir}}/GeoLite2-Country.mmdb:/opt/echoip/GeoLite2-Country.mmdb:ro
|
||||
command: >
|
||||
-p -r -H "X-Forwarded-For" -l ":8080"
|
||||
-a /opt/echoip/GeoLite2-ASN.mmdb
|
||||
-c /opt/echoip/GeoLite2-City.mmdb
|
||||
-f /opt/echoip/GeoLite2-Country.mmdb
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
143
config/ansible/tasks/servers/services/echoip/echoip.yml
Normal file
143
config/ansible/tasks/servers/services/echoip/echoip.yml
Normal file
@ -0,0 +1,143 @@
|
||||
---
|
||||
- name: Deploy EchoIP service
|
||||
block:
|
||||
- name: Set EchoIP directories
|
||||
ansible.builtin.set_fact:
|
||||
echoip_service_dir: "{{ ansible_env.HOME }}/services/echoip"
|
||||
echoip_data_dir: "/mnt/object_storage/services/echoip"
|
||||
maxmind_account_id: "{{ lookup('community.general.onepassword', 'finpwvqp6evflzjcsnwge74n34',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='account_id') | regex_replace('\\s+', '') }}"
|
||||
maxmind_license_key: "{{ lookup('community.general.onepassword', 'finpwvqp6evflzjcsnwge74n34',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='license_key') | regex_replace('\\s+', '') }}"
|
||||
|
||||
- name: Create EchoIP directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create EchoIP data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
# Create directories for extracted databases
|
||||
- name: Create directory for ASN database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create directory for City database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-City"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create directory for Country database extraction
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-Country"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
# Download all databases
|
||||
- name: Download GeoLite2 ASN database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
- name: Download GeoLite2 City database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
- name: Download GeoLite2 Country database
|
||||
ansible.builtin.get_url:
|
||||
url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key={{ maxmind_license_key }}&suffix=tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
mode: "0644"
|
||||
|
||||
# Extract all databases
|
||||
- name: Extract GeoLite2 ASN database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
remote_src: true
|
||||
register: asn_extracted
|
||||
|
||||
- name: Extract GeoLite2 City database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-City"
|
||||
remote_src: true
|
||||
register: city_extracted
|
||||
|
||||
- name: Extract GeoLite2 Country database
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
dest: "{{ echoip_data_dir }}/GeoLite2-Country"
|
||||
remote_src: true
|
||||
register: country_extracted
|
||||
|
||||
# Move all databases to the correct locations
|
||||
- name: Move ASN database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-ASN -name GeoLite2-ASN.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-ASN.mmdb \\;"
|
||||
when: asn_extracted.changed
|
||||
|
||||
- name: Move City database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-City -name GeoLite2-City.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-City.mmdb \\;"
|
||||
when: city_extracted.changed
|
||||
|
||||
- name: Move Country database to correct location
|
||||
ansible.builtin.command:
|
||||
cmd: "find {{ echoip_data_dir }}/GeoLite2-Country -name GeoLite2-Country.mmdb -exec mv {} {{ echoip_data_dir }}/GeoLite2-Country.mmdb \\;"
|
||||
when: country_extracted.changed
|
||||
|
||||
# Clean up unnecessary files
|
||||
- name: Remove downloaded tar.gz files
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-ASN.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted ASN folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-ASN"
|
||||
|
||||
- name: Remove downloaded City tar.gz file
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-City.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted City folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-City"
|
||||
|
||||
- name: Remove downloaded Country tar.gz file
|
||||
ansible.builtin.file:
|
||||
path: "{{ echoip_data_dir }}/GeoLite2-Country.tar.gz"
|
||||
state: absent
|
||||
|
||||
- name: Remove extracted Country folder
|
||||
ansible.builtin.command:
|
||||
cmd: "rm -rf {{ echoip_data_dir }}/GeoLite2-Country"
|
||||
|
||||
# Deploy and restart the EchoIP service
|
||||
- name: Deploy EchoIP docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ echoip_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: echoip_compose
|
||||
|
||||
- name: Stop EchoIP service
|
||||
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: echoip_compose.changed
|
||||
|
||||
- name: Start EchoIP service
|
||||
ansible.builtin.command: docker compose -f "{{ echoip_service_dir }}/docker-compose.yml" up -d
|
||||
when: echoip_compose.changed
|
@ -0,0 +1,27 @@
|
||||
services:
|
||||
factorio-server-manager:
|
||||
image: "ofsm/ofsm:latest"
|
||||
restart: "unless-stopped"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- "FACTORIO_VERSION=stable"
|
||||
- "RCON_PASS=458fc84534"
|
||||
ports:
|
||||
- "5080:80"
|
||||
- "34197:34197/udp"
|
||||
volumes:
|
||||
- {{ factorio_data_dir }}/fsm-data:/opt/fsm-data
|
||||
- {{ factorio_data_dir }}/factorio-data/saves:/opt/factorio/saves
|
||||
- {{ factorio_data_dir }}/factorio-data/mods:/opt/factorio/mods
|
||||
- {{ factorio_data_dir }}/factorio-data/config:/opt/factorio/config
|
||||
- {{ factorio_data_dir }}/factorio-data/mod_packs:/opt/fsm/mod_packs
|
||||
networks:
|
||||
- factorio
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
factorio:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
28
config/ansible/tasks/servers/services/factorio/factorio.yml
Normal file
28
config/ansible/tasks/servers/services/factorio/factorio.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Deploy Factorio service
|
||||
block:
|
||||
- name: Set Factorio directories
|
||||
ansible.builtin.set_fact:
|
||||
factorio_service_dir: "{{ ansible_env.HOME }}/services/factorio"
|
||||
factorio_data_dir: "/mnt/object_storage/services/factorio"
|
||||
|
||||
- name: Create Factorio directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ factorio_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Factorio docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ factorio_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: factorio_compose
|
||||
|
||||
- name: Stop Factorio service
|
||||
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: factorio_compose.changed
|
||||
|
||||
- name: Start Factorio service
|
||||
ansible.builtin.command: docker compose -f "{{ factorio_service_dir }}/docker-compose.yml" up -d
|
||||
when: factorio_compose.changed
|
@ -0,0 +1,98 @@
|
||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||
|
||||
# You don't have to copy this file to your instance,
|
||||
# just run `./act_runner generate-config > config.yaml` to generate a config file.
|
||||
|
||||
log:
|
||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
level: info
|
||||
|
||||
runner:
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
# Extra environment variables to run jobs from a file.
|
||||
# It will be ignored if it's empty or the file doesn't exist.
|
||||
env_file: .env
|
||||
# The timeout for a job to be finished.
|
||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
timeout: 3h
|
||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
insecure: false
|
||||
# The timeout for fetching the job from the Gitea instance.
|
||||
fetch_timeout: 5s
|
||||
# The interval for fetching the job from the Gitea instance.
|
||||
fetch_interval: 2s
|
||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
|
||||
# If it's empty when registering, it will ask for inputting labels.
|
||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
labels:
|
||||
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 0
|
||||
# The external cache server URL. Valid only when enable is true.
|
||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
# The URL should generally end with "/".
|
||||
external_server: ""
|
||||
|
||||
container:
|
||||
# Specifies the network to which the container will connect.
|
||||
# Could be host, bridge or the name of a custom network.
|
||||
# If it's empty, act_runner will create a network automatically.
|
||||
network: ""
|
||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||
privileged: false
|
||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||
options:
|
||||
# The parent directory of a job's working directory.
|
||||
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
|
||||
# If the path starts with '/', the '/' will be trimmed.
|
||||
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
|
||||
# If it's empty, /workspace will be used.
|
||||
workdir_parent:
|
||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||
# valid_volumes:
|
||||
# - data
|
||||
# - /src/*.json
|
||||
# If you want to allow any volume, please use the following configuration:
|
||||
# valid_volumes:
|
||||
# - '**'
|
||||
valid_volumes: []
|
||||
# overrides the docker client host with the specified one.
|
||||
# If it's empty, act_runner will find an available docker host automatically.
|
||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
docker_host: ""
|
||||
# Pull docker image(s) even if already present
|
||||
force_pull: true
|
||||
# Rebuild docker image(s) even if already present
|
||||
force_rebuild: false
|
||||
|
||||
host:
|
||||
# The parent directory of a job's working directory.
|
||||
# If it's empty, $HOME/.cache/act/ will be used.
|
||||
workdir_parent: /tmp/act_runner
|
@ -0,0 +1,54 @@
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:latest
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
volumes:
|
||||
- {{gitea_data_dir}}/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3030:3000"
|
||||
- "22:22"
|
||||
networks:
|
||||
- gitea
|
||||
- caddy_network
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD={{ lookup('community.general.onepassword', '4gnclyzztfgqq7yxa3ctxs6tey', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='POSTGRES_PASSWORD') }}
|
||||
- POSTGRES_DB=gitea
|
||||
volumes:
|
||||
- {{gitea_data_dir}}/postgres:/var/lib/postgresql/data
|
||||
networks:
|
||||
- gitea
|
||||
|
||||
act_runner:
|
||||
image: gitea/act_runner:latest
|
||||
volumes:
|
||||
- {{gitea_service_dir}}/act-runner-config.yaml:/config.yaml
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /tmp/act_runner:/tmp/act_runner
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- GITEA_INSTANCE_URL=https://git.mvl.sh
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN={{ lookup('community.general.onepassword', '4gnclyzztfgqq7yxa3ctxs6tey', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='GITEA_RUNNER_REGISTRATION_TOKEN') }}
|
||||
- GITEA_RUNNER_NAME=act-worker
|
||||
- CONFIG_FILE=/config.yaml
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
40
config/ansible/tasks/servers/services/gitea/gitea.yml
Normal file
40
config/ansible/tasks/servers/services/gitea/gitea.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
- name: Deploy Gitea service
|
||||
block:
|
||||
- name: Set Gitea directories
|
||||
ansible.builtin.set_fact:
|
||||
gitea_data_dir: "/mnt/object_storage/services/gitea"
|
||||
gitea_service_dir: "{{ ansible_env.HOME }}/services/gitea"
|
||||
|
||||
- name: Create Gitea directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ gitea_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ gitea_data_dir }}"
|
||||
- "{{ gitea_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: gitea_dir
|
||||
|
||||
- name: Deploy Gitea docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ gitea_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: gitea_compose
|
||||
|
||||
- name: Deploy Gitea act-runner-config.yaml
|
||||
ansible.builtin.template:
|
||||
src: act-runner-config.yaml.j2
|
||||
dest: "{{ gitea_service_dir }}/act-runner-config.yaml"
|
||||
mode: "0644"
|
||||
register: gitea_act_runner_config
|
||||
|
||||
- name: Stop Gitea service
|
||||
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: gitea_compose.changed or gitea_act_runner_config.changed
|
||||
|
||||
- name: Start Gitea service
|
||||
ansible.builtin.command: docker compose -f "{{ gitea_service_dir }}/docker-compose.yml" up -d
|
||||
when: gitea_compose.changed or gitea_act_runner_config.changed
|
@ -0,0 +1,10 @@
|
||||
name: golink
|
||||
services:
|
||||
server:
|
||||
image: ghcr.io/tailscale/golink:main
|
||||
user: root
|
||||
environment:
|
||||
- TS_AUTHKEY={{ lookup('community.general.onepassword', '4gsgavajnxfpcrjvbkqhoc4drm', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='TS_AUTHKEY') }}
|
||||
volumes:
|
||||
- {{ golink_data_dir }}:/home/nonroot
|
||||
restart: "unless-stopped"
|
33
config/ansible/tasks/servers/services/golink/golink.yml
Normal file
33
config/ansible/tasks/servers/services/golink/golink.yml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Deploy GoLink service
|
||||
block:
|
||||
- name: Set GoLink directories
|
||||
ansible.builtin.set_fact:
|
||||
golink_data_dir: "/mnt/object_storage/services/golink"
|
||||
golink_service_dir: "{{ ansible_env.HOME }}/services/golink"
|
||||
|
||||
- name: Create GoLink directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ golink_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ golink_data_dir }}"
|
||||
- "{{ golink_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: golink_dir
|
||||
|
||||
- name: Deploy GoLink docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ golink_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: golink_compose
|
||||
|
||||
- name: Stop GoLink service
|
||||
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: golink_compose.changed
|
||||
|
||||
- name: Start GoLink service
|
||||
ansible.builtin.command: docker compose -f "{{ golink_service_dir }}/docker-compose.yml" up -d
|
||||
when: golink_compose.changed
|
@ -0,0 +1,42 @@
|
||||
services:
|
||||
hoarder:
|
||||
image: ghcr.io/hoarder-app/hoarder:${HOARDER_VERSION:-release}
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- {{ hoarder_data_dir }}/hoarder:/data
|
||||
ports:
|
||||
- 3500:3000
|
||||
env_file:
|
||||
- .env
|
||||
networks:
|
||||
- hoarder
|
||||
- caddy_network
|
||||
|
||||
chrome:
|
||||
image: zenika/alpine-chrome:124
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- --no-sandbox
|
||||
- --disable-gpu
|
||||
- --disable-dev-shm-usage
|
||||
- --remote-debugging-address=0.0.0.0
|
||||
- --remote-debugging-port=9222
|
||||
- --hide-scrollbars
|
||||
networks:
|
||||
- hoarder
|
||||
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.11.1
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- {{ hoarder_data_dir }}/meilisearch:/meili_data
|
||||
networks:
|
||||
- hoarder
|
||||
|
||||
networks:
|
||||
hoarder:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
15
config/ansible/tasks/servers/services/hoarder/dotenv.j2
Normal file
15
config/ansible/tasks/servers/services/hoarder/dotenv.j2
Normal file
@ -0,0 +1,15 @@
|
||||
HOARDER_VERSION=release
|
||||
MEILI_NO_ANALYTICS=true
|
||||
|
||||
MEILI_ADDR=http://meilisearch:7700
|
||||
BROWSER_WEB_URL=http://chrome:9222
|
||||
|
||||
DATA_DIR=/data
|
||||
|
||||
TZ=Europe/Amsterdam
|
||||
PUID=1000
|
||||
PGID=100
|
||||
|
||||
NEXTAUTH_SECRET="{{ lookup('community.general.onepassword', 'osnzlfidxonvetmomdgn7vxu5a', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='NEXTAUTH_SECRET') }}"
|
||||
MEILI_MASTER_KEY="{{ lookup('community.general.onepassword', 'osnzlfidxonvetmomdgn7vxu5a', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='MEILI_MASTER_KEY') }}"
|
||||
OPENAI_API_KEY="{{ lookup('community.general.onepassword', 'osnzlfidxonvetmomdgn7vxu5a', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='OPENAI_API_KEY') }}"
|
40
config/ansible/tasks/servers/services/hoarder/hoarder.yml
Normal file
40
config/ansible/tasks/servers/services/hoarder/hoarder.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
- name: Deploy Hoarder service
|
||||
block:
|
||||
- name: Set Hoarder directories
|
||||
ansible.builtin.set_fact:
|
||||
hoarder_data_dir: "/mnt/object_storage/services/hoarder"
|
||||
hoarder_service_dir: "{{ ansible_env.HOME }}/services/hoarder"
|
||||
|
||||
- name: Create Hoarder directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ hoarder_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ hoarder_data_dir }}"
|
||||
- "{{ hoarder_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: hoarder_dir
|
||||
|
||||
- name: Deploy Hoarder docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ hoarder_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: hoarder_compose
|
||||
|
||||
- name: Deploy Hoarder .env
|
||||
ansible.builtin.template:
|
||||
src: dotenv.j2
|
||||
dest: "{{ hoarder_service_dir }}/.env"
|
||||
mode: "0644"
|
||||
register: hoarder_compose
|
||||
|
||||
- name: Stop Hoarder service
|
||||
ansible.builtin.command: docker compose -f "{{ hoarder_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: hoarder_compose.changed
|
||||
|
||||
- name: Start Hoarder service
|
||||
ansible.builtin.command: docker compose -f "{{ hoarder_service_dir }}/docker-compose.yml" up -d
|
||||
when: hoarder_compose.changed
|
@ -0,0 +1,91 @@
|
||||
services:
|
||||
immich:
|
||||
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||
volumes:
|
||||
- {{ immich_data_dir }}:/usr/src/app/upload
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- '2283:2283'
|
||||
depends_on:
|
||||
- redis
|
||||
- database
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich
|
||||
- caddy_network
|
||||
|
||||
machine-learning:
|
||||
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
||||
volumes:
|
||||
- model-cache:/cache
|
||||
env_file:
|
||||
- .env
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
|
||||
environment:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
volumes:
|
||||
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
|
||||
- {{ immich_database_dir }}:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
|
||||
interval: 5m
|
||||
start_interval: 30s
|
||||
start_period: 5m
|
||||
command:
|
||||
[
|
||||
'postgres',
|
||||
'-c',
|
||||
'shared_preload_libraries=vectors.so',
|
||||
'-c',
|
||||
'search_path="$$user", public, vectors',
|
||||
'-c',
|
||||
'logging_collector=on',
|
||||
'-c',
|
||||
'max_wal_size=2GB',
|
||||
'-c',
|
||||
'shared_buffers=512MB',
|
||||
'-c',
|
||||
'wal_compression=on',
|
||||
]
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich
|
||||
|
||||
volumes:
|
||||
model-cache:
|
||||
|
||||
networks:
|
||||
immich:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
10
config/ansible/tasks/servers/services/immich/dotenv.j2
Normal file
10
config/ansible/tasks/servers/services/immich/dotenv.j2
Normal file
@ -0,0 +1,10 @@
|
||||
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
|
||||
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
TZ=Europe/Amsterdam
|
||||
|
||||
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
|
||||
IMMICH_VERSION=release
|
||||
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=postgres
|
||||
DB_DATABASE_NAME=immich
|
41
config/ansible/tasks/servers/services/immich/immich.yml
Normal file
41
config/ansible/tasks/servers/services/immich/immich.yml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
- name: Deploy Immich service
|
||||
block:
|
||||
- name: Set Immich directories
|
||||
ansible.builtin.set_fact:
|
||||
immich_data_dir: "/mnt/object_storage/photos/immich-library"
|
||||
immich_database_dir: "/mnt/object_storage/services/immich/postgres"
|
||||
immich_service_dir: "{{ ansible_env.HOME }}/services/immich"
|
||||
|
||||
- name: Create Immich directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ immich_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ immich_data_dir }}"
|
||||
- "{{ immich_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: immich_dir
|
||||
|
||||
- name: Deploy Immich docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ immich_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: immich_compose
|
||||
|
||||
- name: Deploy Immich .env
|
||||
ansible.builtin.template:
|
||||
src: dotenv.j2
|
||||
dest: "{{ immich_service_dir }}/.env"
|
||||
mode: "0644"
|
||||
register: immich_compose
|
||||
|
||||
- name: Stop Immich service
|
||||
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: immich_compose.changed
|
||||
|
||||
- name: Start Immich service
|
||||
ansible.builtin.command: docker compose -f "{{ immich_service_dir }}/docker-compose.yml" up -d
|
||||
when: immich_compose.changed
|
@ -0,0 +1,42 @@
|
||||
services:
|
||||
jellyfin:
|
||||
image: lscr.io/linuxserver/jellyfin:latest
|
||||
container_name: jellyfin
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- JELLYFIN_PublishedServerUrl=https://jellyfin.mvl.sh
|
||||
volumes:
|
||||
- {{jellyfin_data_dir}}/jellyfin-config:/config
|
||||
- /mnt/object_storage/movies:/movies
|
||||
- /mnt/object_storage/tvshows:/tvshows
|
||||
- /mnt/object_storage/music:/music
|
||||
ports:
|
||||
- 8096:8096
|
||||
- 8920:8920
|
||||
- 7359:7359/udp
|
||||
- 1901:1900/udp
|
||||
restart: unless-stopped
|
||||
group_add:
|
||||
- "992"
|
||||
- "44"
|
||||
networks:
|
||||
- caddy_network
|
||||
|
||||
fladder:
|
||||
image: ghcr.io/donutware/fladder:latest
|
||||
ports:
|
||||
- 5423:80
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- BASE_URL=https://jellyfin.mvl.sh
|
||||
networks:
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
33
config/ansible/tasks/servers/services/jellyfin/jellyfin.yml
Normal file
33
config/ansible/tasks/servers/services/jellyfin/jellyfin.yml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Deploy Jellyfin service
|
||||
block:
|
||||
- name: Set Jellyfin directories
|
||||
ansible.builtin.set_fact:
|
||||
jellyfin_data_dir: "/mnt/object_storage/services/jellyfin"
|
||||
jellyfin_service_dir: "{{ ansible_env.HOME }}/services/jellyfin"
|
||||
|
||||
- name: Create Jellyfin directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ jellyfin_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ jellyfin_data_dir }}"
|
||||
- "{{ jellyfin_service_dir }}"
|
||||
loop_control:
|
||||
loop_var: jellyfin_dir
|
||||
|
||||
- name: Deploy Jellyfin docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ jellyfin_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: jellyfin_compose
|
||||
|
||||
- name: Stop Jellyfin service
|
||||
ansible.builtin.command: docker compose -f "{{ jellyfin_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: jellyfin_compose.changed
|
||||
|
||||
- name: Start Jellyfin service
|
||||
ansible.builtin.command: docker compose -f "{{ jellyfin_service_dir }}/docker-compose.yml" up -d
|
||||
when: jellyfin_compose.changed
|
@ -0,0 +1,22 @@
|
||||
services:
|
||||
juicefs-redis:
|
||||
image: redis:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "6379:6379"
|
||||
volumes:
|
||||
- /mnt/services/redis-data:/data
|
||||
command: ["redis-server", "--appendonly", "yes", "--requirepass", "{{ REDIS_PASSWORD }}"]
|
||||
environment:
|
||||
- TZ=Europe/Amsterdam
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "-a", "{{ REDIS_PASSWORD }}", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
networks:
|
||||
- juicefs-network
|
||||
|
||||
networks:
|
||||
juicefs-network:
|
77
config/ansible/tasks/servers/services/redis/redis.yml
Normal file
77
config/ansible/tasks/servers/services/redis/redis.yml
Normal file
@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: Deploy Redis for JuiceFS
|
||||
block:
|
||||
- name: Set Redis facts
|
||||
ansible.builtin.set_fact:
|
||||
redis_service_dir: "{{ ansible_env.HOME }}/services/juicefs-redis"
|
||||
redis_password: "{{ lookup('community.general.onepassword', '4cioblm633bdkl6put35lk6ql4', vault='j7nmhqlsjmp2r6umly5t75hzb4', field='password') }}"
|
||||
|
||||
- name: Create Redis service directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ redis_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Redis docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ redis_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: redis_compose
|
||||
vars:
|
||||
REDIS_PASSWORD: "{{ redis_password }}"
|
||||
|
||||
- name: Check if juicefs.service exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/systemd/system/juicefs.service
|
||||
register: juicefs_service_stat
|
||||
|
||||
- name: Stop juicefs.service to umount JuiceFS
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
state: stopped
|
||||
enabled: false
|
||||
register: juicefs_stop
|
||||
changed_when: juicefs_stop.changed
|
||||
when: redis_compose.changed and juicefs_service_stat.stat.exists
|
||||
|
||||
- name: List containers that are running
|
||||
ansible.builtin.command: docker ps -q
|
||||
register: docker_ps
|
||||
changed_when: docker_ps.rc == 0
|
||||
when: redis_compose.changed
|
||||
|
||||
- name: Stop all docker containers
|
||||
ansible.builtin.command: docker stop {{ item }}
|
||||
loop: "{{ docker_ps.stdout_lines }}"
|
||||
register: docker_stop
|
||||
changed_when: docker_stop.rc == 0
|
||||
when: redis_compose.changed
|
||||
ignore_errors: true
|
||||
|
||||
- name: Start Redis service
|
||||
ansible.builtin.command: docker compose -f "{{ redis_service_dir }}/docker-compose.yml" up -d
|
||||
register: redis_start
|
||||
changed_when: redis_start.rc == 0
|
||||
|
||||
- name: Wait for Redis to be ready
|
||||
ansible.builtin.wait_for:
|
||||
host: localhost
|
||||
port: 6379
|
||||
timeout: 30
|
||||
|
||||
- name: Start juicefs.service to mount JuiceFS
|
||||
ansible.builtin.systemd:
|
||||
name: juicefs.service
|
||||
state: started
|
||||
enabled: true
|
||||
register: juicefs_start
|
||||
changed_when: juicefs_start.changed
|
||||
when: juicefs_service_stat.stat.exists
|
||||
|
||||
- name: Restart containers that were stopped
|
||||
ansible.builtin.command: docker start {{ item }}
|
||||
loop: "{{ docker_stop.results | map(attribute='item') | list }}"
|
||||
register: docker_restart
|
||||
changed_when: docker_restart.rc == 0
|
||||
when: redis_compose.changed
|
@ -0,0 +1,90 @@
|
||||
services:
|
||||
db:
|
||||
image: {{ seafile_db_image | default('mariadb:10.11') }}
|
||||
container_name: seafile-mysql
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: {{ seafile_mysql_root_password | default('ROOT_PASSWORD') }}
|
||||
MYSQL_USER: {{ seafile_mysql_db_user | default('seafile') }}
|
||||
MYSQL_PASSWORD: {{ seafile_mysql_db_password | default('PASSWORD') }}
|
||||
MYSQL_DATABASE: {{ seafile_mysql_db_name | default('seafile') }}
|
||||
volumes:
|
||||
- {{ seafile_mysql_volume | default('/opt/seafile-mysql/db') }}:/var/lib/mysql
|
||||
networks:
|
||||
- seafile-net
|
||||
restart: unless-stopped
|
||||
|
||||
memcached:
|
||||
image: {{ seafile_memcached_image | default('memcached:1.6.29') }}
|
||||
container_name: seafile-memcached
|
||||
entrypoint: memcached -m 256
|
||||
networks:
|
||||
- seafile-net
|
||||
restart: unless-stopped
|
||||
|
||||
seafile:
|
||||
image: {{ seafile_image | default('seafileltd/seafile-mc:12.0-latest') }}
|
||||
container_name: seafile
|
||||
environment:
|
||||
- DB_HOST={{ seafile_mysql_db_host | default('db') }}
|
||||
- DB_ROOT_PASSWD={{ seafile_mysql_root_password | default('ROOT_PASSWORD') }}
|
||||
- TIME_ZONE={{ time_zone | default('Europe/Amsterdam') }}
|
||||
- SEAFILE_ADMIN_EMAIL={{ seafile_admin_email | default('menno@vleeuwen.me') }}
|
||||
- SEAFILE_ADMIN_PASSWORD={{ seafile_admin_password | default('WIP123') }}
|
||||
- SEAFILE_SERVER_HOSTNAME={{ seafile_server_hostname | default('sf.mvl.sh') }}
|
||||
- SEAFILE_SERVER_LETSENCRYPT=false
|
||||
- SEADRIVE_SERVER_LETSENCRYPT=false
|
||||
- SEAFILE_SERVER_PROTOCOL={{ seafile_server_protocol | default('http') }}
|
||||
- JWT_PRIVATE_KEY={{ jwt_private_key | default('') }}
|
||||
- ENABLE_SEADOC=false
|
||||
volumes:
|
||||
- {{ seafile_volume | default('/opt/seafile-data') }}:/shared
|
||||
networks:
|
||||
- seafile-net
|
||||
- caddy_network
|
||||
ports:
|
||||
- 8001:80
|
||||
- 8082:8082
|
||||
- 8000:8000
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- db
|
||||
- memcached
|
||||
|
||||
notification-server:
|
||||
image: {{ notification_server_image | default('seafileltd/notification-server:12.0-latest') }}
|
||||
container_name: notification-server
|
||||
environment:
|
||||
- DB_HOST={{ seafile_mysql_db_host | default('db') }}
|
||||
- DB_ROOT_PASSWD={{ seafile_mysql_root_password | default('ROOT_PASSWORD') }}
|
||||
- TIME_ZONE={{ time_zone | default('Europe/Amsterdam') }}
|
||||
volumes:
|
||||
- {{ notification_server_volume | default('/opt/notification-data') }}:/shared
|
||||
- {{ seafile_volume | default('/opt/seafile-data') }}:/shared/seafile
|
||||
networks:
|
||||
- seafile-net
|
||||
- caddy_network
|
||||
depends_on:
|
||||
- db
|
||||
- seafile
|
||||
restart: unless-stopped
|
||||
|
||||
onlyoffice:
|
||||
image: onlyoffice/documentserver:8.3.1.1
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- JWT_ENABLED=true
|
||||
- JWT_SECRET={{ jwt_private_key | default('') }}
|
||||
- WOPI_ENABLED=false
|
||||
volumes:
|
||||
- {{ seafile_data_dir }}/onlyoffice/logs:/var/log/onlyoffice
|
||||
- {{ seafile_data_dir }}/onlyoffice/data:/var/www/onlyoffice/Data
|
||||
- {{ seafile_data_dir }}/onlyoffice/lib:/var/lib/onlyoffice
|
||||
networks:
|
||||
- seafile-net
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
seafile-net:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
72
config/ansible/tasks/servers/services/seafile/seafile.yml
Normal file
72
config/ansible/tasks/servers/services/seafile/seafile.yml
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
- name: Deploy Seafile service
|
||||
block:
|
||||
- name: Set Seafile directories
|
||||
ansible.builtin.set_fact:
|
||||
seafile_data_dir: "/mnt/object_storage/services/seafile"
|
||||
seafile_service_dir: "{{ ansible_env.HOME }}/services/seafile"
|
||||
|
||||
- name: Set Seafile configuration variables
|
||||
ansible.builtin.set_fact:
|
||||
# Docker images
|
||||
seafile_image: "seafileltd/seafile-mc:12.0-latest"
|
||||
seafile_db_image: "mariadb:10.11"
|
||||
seafile_memcached_image: "memcached:1.6.29"
|
||||
notification_server_image: "seafileltd/notification-server:12.0-latest"
|
||||
|
||||
# Volume paths
|
||||
seafile_volume: "{{ seafile_data_dir }}/seafile-data"
|
||||
seafile_mysql_volume: "{{ seafile_data_dir }}/seafile-mysql/db"
|
||||
notification_server_volume: "{{ seafile_data_dir }}/notification-data"
|
||||
|
||||
# Database settings
|
||||
seafile_mysql_db_host: "db"
|
||||
seafile_mysql_root_password: >
|
||||
{{ lookup('community.general.onepassword', 'bbzudwdo3byqs4pscd2wy7qsn4',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='MYSQL_ROOT_PASSWORD') }}
|
||||
seafile_mysql_db_user: "seafile"
|
||||
seafile_mysql_db_password: >
|
||||
{{ lookup('community.general.onepassword', 'bbzudwdo3byqs4pscd2wy7qsn4',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='MYSQL_PASSWORD') }}
|
||||
|
||||
# Server settings
|
||||
time_zone: "Europe/Amsterdam"
|
||||
jwt_private_key: >
|
||||
{{ lookup('community.general.onepassword', 'bbzudwdo3byqs4pscd2wy7qsn4',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='jwt_private_key') }}
|
||||
seafile_server_hostname: "sf.mvl.sh"
|
||||
seafile_server_protocol: "https"
|
||||
|
||||
# Admin credentials
|
||||
seafile_admin_email: "menno@vleeuwen.me"
|
||||
seafile_admin_password: >
|
||||
{{ lookup('community.general.onepassword', 'bbzudwdo3byqs4pscd2wy7qsn4',
|
||||
vault='j7nmhqlsjmp2r6umly5t75hzb4', field='password') }}
|
||||
|
||||
- name: Create Seafile directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ seafile_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- "{{ seafile_data_dir }}"
|
||||
- "{{ seafile_service_dir }}"
|
||||
- "{{ notification_server_volume }}/logs"
|
||||
- "{{ seafile_volume }}/logs"
|
||||
loop_control:
|
||||
loop_var: seafile_dir
|
||||
|
||||
- name: Deploy Seafile configuration files
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ seafile_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: seafile_configs
|
||||
|
||||
- name: Stop Seafile service
|
||||
ansible.builtin.command: docker compose -f "{{ seafile_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: seafile_configs.changed
|
||||
|
||||
- name: Start Seafile service
|
||||
ansible.builtin.command: docker compose -f "{{ seafile_service_dir }}/docker-compose.yml" up -d
|
||||
when: seafile_configs.changed
|
43
config/ansible/tasks/servers/services/service_cleanup.yml
Normal file
43
config/ansible/tasks/servers/services/service_cleanup.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Cleanup disabled services
|
||||
block:
|
||||
- name: Prepare cleanup list
|
||||
ansible.builtin.set_fact:
|
||||
services_to_cleanup: "{{ services | selectattr('enabled', 'equalto', false) | list }}"
|
||||
|
||||
- name: Check service directories existence for disabled services
|
||||
ansible.builtin.stat:
|
||||
path: "{{ ansible_env.HOME }}/services/{{ item.name }}"
|
||||
register: service_dir_results
|
||||
loop: "{{ services_to_cleanup }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Filter services with existing directories
|
||||
ansible.builtin.set_fact:
|
||||
services_with_dirs: "{{ service_dir_results.results | selectattr('stat.exists', 'equalto', true) | map(attribute='item') | list }}"
|
||||
|
||||
- name: Check if docker-compose file exists for services to cleanup
|
||||
ansible.builtin.stat:
|
||||
path: "{{ ansible_env.HOME }}/services/{{ item.name }}/docker-compose.yml"
|
||||
register: compose_file_results
|
||||
loop: "{{ services_with_dirs }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
|
||||
- name: Stop disabled services with docker-compose files
|
||||
ansible.builtin.command: docker compose -f "{{ ansible_env.HOME }}/services/{{ item.item.name }}/docker-compose.yml" down --remove-orphans
|
||||
loop: "{{ compose_file_results.results | selectattr('stat.exists', 'equalto', true) }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
register: service_stop_results
|
||||
become: false
|
||||
failed_when: false # Continue even if the command fails
|
||||
|
||||
- name: Remove service directories for disabled services
|
||||
ansible.builtin.file:
|
||||
path: "{{ ansible_env.HOME }}/services/{{ item.name }}"
|
||||
state: absent
|
||||
loop: "{{ services_with_dirs }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
10
config/ansible/tasks/servers/services/services.yml
Normal file
10
config/ansible/tasks/servers/services/services.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Include service cleanup tasks
|
||||
ansible.builtin.include_tasks: service_cleanup.yml
|
||||
|
||||
- name: Include service tasks
|
||||
ansible.builtin.include_tasks: "{{ item.name }}/{{ item.name }}.yml"
|
||||
loop: "{{ services }}"
|
||||
when: item.enabled|bool
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
@ -0,0 +1,22 @@
|
||||
services:
|
||||
uptime-kuma:
|
||||
image: louislam/uptime-kuma:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- {{ uptime_kuma_data_dir }}:/app/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
ports:
|
||||
- "3001:3001"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- caddy_network
|
||||
|
||||
networks:
|
||||
caddy_network:
|
||||
external: true
|
||||
name: caddy_default
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Deploy Uptime Kuma service
|
||||
block:
|
||||
- name: Set Uptime Kuma directories
|
||||
ansible.builtin.set_fact:
|
||||
uptime_kuma_service_dir: "{{ ansible_env.HOME }}/services/uptime-kuma"
|
||||
uptime_kuma_data_dir: "/mnt/object_storage/services/uptime-kuma"
|
||||
|
||||
- name: Create Uptime Kuma directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ uptime_kuma_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy Uptime Kuma docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ uptime_kuma_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: uptime_kuma_compose
|
||||
|
||||
- name: Stop Uptime Kuma service if config changed
|
||||
ansible.builtin.command: docker compose -f "{{ uptime_kuma_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: uptime_kuma_compose.changed
|
||||
|
||||
- name: Start Uptime Kuma service
|
||||
ansible.builtin.command: docker compose -f "{{ uptime_kuma_service_dir }}/docker-compose.yml" up -d
|
||||
when: uptime_kuma_compose.changed or uptime_kuma_start | default(false) | bool
|
@ -0,0 +1,19 @@
|
||||
services:
|
||||
wireguard:
|
||||
image: lscr.io/linuxserver/wireguard:latest
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=100
|
||||
- TZ=Europe/Amsterdam
|
||||
- SERVERURL=mvl.sh
|
||||
- PEERS=worklaptop,phone,desktop,personallaptop
|
||||
- ALLOWEDIPS=0.0.0.0/0, ::/0
|
||||
volumes:
|
||||
- "{{ wireguard_data_dir }}/wg-data:/config"
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
sysctls:
|
||||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
restart: unless-stopped
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Deploy WireGuard service
|
||||
block:
|
||||
- name: Set WireGuard directories
|
||||
ansible.builtin.set_fact:
|
||||
wireguard_service_dir: "{{ ansible_env.HOME }}/services/wireguard"
|
||||
wireguard_data_dir: "/mnt/object_storage/services/wireguard"
|
||||
|
||||
- name: Create WireGuard directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ wireguard_service_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy WireGuard docker-compose.yml
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ wireguard_service_dir }}/docker-compose.yml"
|
||||
mode: "0644"
|
||||
register: wireguard_compose
|
||||
|
||||
- name: Stop WireGuard service
|
||||
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" down --remove-orphans
|
||||
when: wireguard_compose.changed
|
||||
|
||||
- name: Start WireGuard service
|
||||
ansible.builtin.command: docker compose -f "{{ wireguard_service_dir }}/docker-compose.yml" up -d
|
||||
when: wireguard_compose.changed
|
45
config/ansible/tasks/workstations/1password-browsers.yml
Normal file
45
config/ansible/tasks/workstations/1password-browsers.yml
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
- name: Check if 1Password is installed
|
||||
ansible.builtin.command: 1password --version
|
||||
register: onepassword_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Check if 1Password is running anywhere
|
||||
ansible.builtin.command: pgrep 1password
|
||||
register: onepassword_running
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Ensure 1Password custom allowed browsers directory exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/1password
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Add Browsers to 1Password custom allowed browsers
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
ZenBrowser
|
||||
zen-browser
|
||||
app.zen_browser.zen
|
||||
zen
|
||||
Firefox
|
||||
firefox
|
||||
dest: /etc/1password/custom_allowed_browsers
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
become: true
|
||||
register: custom_browsers_file
|
||||
|
||||
- name: Kill any running 1Password instances if configuration changed
|
||||
ansible.builtin.command: pkill 1password
|
||||
when: custom_browsers_file.changed and onepassword_running.stdout != ""
|
||||
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
|
||||
|
||||
- name: If 1Password was killed, restart it...
|
||||
ansible.builtin.command: screen -dmS 1password 1password
|
||||
when: custom_browsers_file.changed and onepassword_running.stdout != ""
|
||||
changed_when: custom_browsers_file.changed and onepassword_running.stdout != ""
|
61
config/ansible/tasks/workstations/firefox-apt.yml
Normal file
61
config/ansible/tasks/workstations/firefox-apt.yml
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Install Firefox via APT (Not Snap)
|
||||
block:
|
||||
- name: Remove Firefox Snap if installed
|
||||
community.general.snap:
|
||||
name: firefox
|
||||
state: absent
|
||||
become: true
|
||||
|
||||
- name: Create APT keyring directory if it doesn't exist
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Import Mozilla APT repo signing key
|
||||
ansible.builtin.get_url:
|
||||
url: https://packages.mozilla.org/apt/repo-signing-key.gpg
|
||||
dest: /etc/apt/keyrings/packages.mozilla.org.asc
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Add Mozilla APT repository
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/apt/sources.list.d/mozilla.list
|
||||
line: "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main"
|
||||
create: true
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Set Firefox package priority
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/apt/preferences.d/mozilla
|
||||
content: |
|
||||
Package: *
|
||||
Pin: origin packages.mozilla.org
|
||||
Pin-Priority: 1000
|
||||
|
||||
Package: firefox*
|
||||
Pin: release o=Ubuntu
|
||||
Pin-Priority: -1
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Update apt cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Remove Ubuntu's Firefox transition package
|
||||
ansible.builtin.apt:
|
||||
name: firefox
|
||||
state: absent
|
||||
become: true
|
||||
|
||||
- name: Install Firefox from Mozilla's repository
|
||||
ansible.builtin.apt:
|
||||
name: firefox
|
||||
state: present
|
||||
become: true
|
51
config/ansible/tasks/workstations/firefoxpwa.yml
Normal file
51
config/ansible/tasks/workstations/firefoxpwa.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Install required packages for FirefoxPWA
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- curl
|
||||
- gpg
|
||||
- apt-transport-https
|
||||
- debian-archive-keyring
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Download FirefoxPWA GPG key
|
||||
ansible.builtin.get_url:
|
||||
url: https://packagecloud.io/filips/FirefoxPWA/gpgkey
|
||||
dest: /usr/share/keyrings/firefoxpwa-keyring.gpg
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Import FirefoxPWA GPG key
|
||||
ansible.builtin.command:
|
||||
cmd: "set -o pipefail && gpg --dearmor < /usr/share/keyrings/firefoxpwa-keyring.gpg | tee /usr/share/keyrings/firefoxpwa-keyring.gpg > /dev/null"
|
||||
args:
|
||||
creates: /usr/share/keyrings/firefoxpwa-keyring.gpg
|
||||
become: true
|
||||
|
||||
- name: Add FirefoxPWA repository
|
||||
ansible.builtin.copy:
|
||||
content: "deb [signed-by=/usr/share/keyrings/firefoxpwa-keyring.gpg] https://packagecloud.io/filips/FirefoxPWA/any any main"
|
||||
dest: /etc/apt/sources.list.d/firefoxpwa.list
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Update apt cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Install FirefoxPWA package
|
||||
ansible.builtin.apt:
|
||||
name: firefoxpwa
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Ensure FirefoxPWA integration is enabled for compatible browsers
|
||||
ansible.builtin.command:
|
||||
cmd: "firefoxpwa install --global"
|
||||
register: pwa_integration
|
||||
changed_when: "'Integration installed' in pwa_integration.stdout"
|
||||
failed_when: false
|
||||
become: true
|
105
config/ansible/tasks/workstations/flatpaks.yml
Normal file
105
config/ansible/tasks/workstations/flatpaks.yml
Normal file
@ -0,0 +1,105 @@
|
||||
---
|
||||
- name: Check if Flatpak is installed
|
||||
ansible.builtin.command: which flatpak
|
||||
register: flatpak_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Install Flatpak
|
||||
ansible.builtin.package:
|
||||
name: flatpak
|
||||
state: present
|
||||
become: true
|
||||
when: flatpak_check.rc != 0
|
||||
|
||||
- name: Add Flathub remote repository
|
||||
community.general.flatpak_remote:
|
||||
name: flathub
|
||||
flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
|
||||
state: present
|
||||
|
||||
- name: Get list of system installed Flatpaks
|
||||
ansible.builtin.command: flatpak list --system --app --columns=application
|
||||
register: installed_system_flatpaks
|
||||
changed_when: false
|
||||
|
||||
- name: Get list of system remotes
|
||||
ansible.builtin.command: flatpak remote-list --system --columns=name
|
||||
register: system_remotes
|
||||
changed_when: false
|
||||
|
||||
- name: Define system desired Flatpaks
|
||||
ansible.builtin.set_fact:
|
||||
desired_system_flatpaks:
|
||||
# GNOME Software
|
||||
- org.gnome.Extensions
|
||||
- org.gnome.Weather
|
||||
- org.gnome.Sudoku
|
||||
|
||||
# Games
|
||||
- io.github.openhv.OpenHV
|
||||
- net.lutris.Lutris
|
||||
- info.beyondallreason.bar
|
||||
- org.godotengine.Godot
|
||||
- dev.bragefuglseth.Keypunch
|
||||
- org.prismlauncher.PrismLauncher
|
||||
|
||||
# Multimedia
|
||||
- com.spotify.Client
|
||||
- com.plexamp.Plexamp
|
||||
- tv.plex.PlexDesktop
|
||||
- io.bassi.Amberol
|
||||
|
||||
# Utilities
|
||||
- org.fkoehler.KTailctl
|
||||
- de.haeckerfelix.AudioSharing
|
||||
- com.usebottles.bottles
|
||||
- com.github.tchx84.Flatseal
|
||||
- com.github.wwmm.easyeffects
|
||||
- org.onlyoffice.desktopeditors
|
||||
- io.gitlab.adhami3310.Impression
|
||||
- io.ente.auth
|
||||
- io.github.fastrizwaan.WineZGUI
|
||||
- net.davidotek.pupgui2
|
||||
- com.mastermindzh.tidal-hifi
|
||||
- io.github.flattool.Warehouse
|
||||
- io.github.nokse22.Exhibit
|
||||
- net.nokyan.Resources
|
||||
- dev.zed.Zed
|
||||
- page.tesk.Refine
|
||||
- io.github.flattool.Ignition
|
||||
- io.github.bytezz.IPLookup
|
||||
- org.gaphor.Gaphor
|
||||
|
||||
- name: Define system desired Flatpak remotes
|
||||
ansible.builtin.set_fact:
|
||||
desired_system_flatpak_remotes:
|
||||
- flathub
|
||||
|
||||
- name: Add desired system Flatpak remotes
|
||||
community.general.flatpak_remote:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
method: system
|
||||
loop: "{{ desired_system_flatpak_remotes }}"
|
||||
|
||||
- name: Remove undesired system Flatpak remotes
|
||||
community.general.flatpak_remote:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
method: system
|
||||
loop: "{{ system_remotes.stdout_lines | difference(desired_system_flatpak_remotes) }}"
|
||||
|
||||
- name: Install/Upgrade Flatpak packages
|
||||
community.general.flatpak:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
method: system
|
||||
loop: "{{ desired_system_flatpaks }}"
|
||||
|
||||
- name: Remove undesired system Flatpaks
|
||||
community.general.flatpak:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
method: system
|
||||
loop: "{{ installed_system_flatpaks.stdout_lines | difference(desired_system_flatpaks) }}"
|
15
config/ansible/tasks/workstations/gnome-extensions.yml
Normal file
15
config/ansible/tasks/workstations/gnome-extensions.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Install Pano - Clipboard Manager dependencies
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- gir1.2-gda-5.0
|
||||
- gir1.2-gsound-1.0
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Install Pano - Clipboard Manager
|
||||
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions/pano.yml
|
||||
|
||||
- name: Install Tiling Shell - Window Manager
|
||||
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions/tilingshell.yml
|
@ -0,0 +1,73 @@
|
||||
---
|
||||
- name: Manage GNOME extension
|
||||
vars:
|
||||
requested_git_tag: "{{ git_tag }}"
|
||||
extension_name: "{{ ext_name }}"
|
||||
extension_url: "{{ ext_url }}"
|
||||
extension_path: "{{ ansible_user_dir }}/.local/share/gnome-shell/extensions/{{ ext_id }}"
|
||||
version_file: "{{ extension_path }}/version.txt"
|
||||
block:
|
||||
- name: Check if extension is installed
|
||||
ansible.builtin.stat:
|
||||
path: "{{ extension_path }}"
|
||||
register: ext_check
|
||||
|
||||
- name: Read last installed version
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ version_file }}"
|
||||
register: installed_version
|
||||
ignore_errors: true
|
||||
when: ext_check.stat.exists
|
||||
|
||||
- name: Determine if update is needed
|
||||
ansible.builtin.set_fact:
|
||||
update_needed: >-
|
||||
{{ installed_version.content is not defined or
|
||||
(installed_version.content | b64decode | trim != requested_git_tag) }}
|
||||
|
||||
- name: Delete old extension if updating
|
||||
ansible.builtin.file:
|
||||
path: "{{ extension_path }}"
|
||||
state: absent
|
||||
when: update_needed
|
||||
|
||||
- name: Create directory for extension
|
||||
ansible.builtin.file:
|
||||
path: "{{ extension_path }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
when: not ext_check.stat.exists or update_needed
|
||||
|
||||
- name: Download extension
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ extension_url | replace('%TAG%', requested_git_tag) }}"
|
||||
dest: "{{ extension_path }}/release.zip"
|
||||
mode: "0644"
|
||||
when: update_needed or not ext_check.stat.exists
|
||||
|
||||
- name: Extract extension
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ extension_path }}/release.zip"
|
||||
dest: "{{ extension_path }}"
|
||||
when: update_needed or not ext_check.stat.exists
|
||||
|
||||
- name: Store installed version of the extension
|
||||
ansible.builtin.copy:
|
||||
content: "{{ requested_git_tag }}"
|
||||
dest: "{{ version_file }}"
|
||||
mode: "0644"
|
||||
when: update_needed or not ext_check.stat.exists
|
||||
|
||||
- name: Cleanup post installation
|
||||
ansible.builtin.file:
|
||||
path: "{{ extension_path }}/release.zip"
|
||||
state: absent
|
||||
when: not ext_check.stat.exists or update_needed
|
||||
|
||||
- name: Notify user of required GNOME Shell reload
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Please reload GNOME Shell by pressing Alt + F2, typing 'r' and pressing Enter.
|
||||
Then enable the {{ extension_name }} in GNOME Tweaks.
|
||||
Or on Wayland, log out and back in.
|
||||
when: not ext_check.stat.exists or update_needed
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Manage Pano Clipboard Manager
|
||||
ansible.builtin.include_tasks: tasks/workstations/gnome-extensions/manage_gnome_extension.yml
|
||||
vars:
|
||||
git_tag: "v23-alpha3"
|
||||
ext_name: "Pano - Clipboard Manager"
|
||||
ext_url: "https://github.com/oae/gnome-shell-pano/releases/download/%TAG%/pano@elhan.io.zip"
|
||||
ext_id: "pano@elhan.io"
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Manage Tiling Shell - Window Manager
|
||||
ansible.builtin.include_tasks: tasks/workstations/gnome-extensions/manage_gnome_extension.yml
|
||||
vars:
|
||||
git_tag: "16.1"
|
||||
ext_name: "Tiling Shell - Window Manager"
|
||||
ext_url: "https://github.com/domferr/tilingshell/releases/download/%TAG%/tilingshell@ferrarodomenico.com.zip"
|
||||
ext_id: "tilingshell@ferrarodomenico.com"
|
74
config/ansible/tasks/workstations/snaps.yml
Normal file
74
config/ansible/tasks/workstations/snaps.yml
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
- name: Ensure snapd is installed
|
||||
ansible.builtin.package:
|
||||
name: snapd
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Ensure snapd service is enabled and started
|
||||
ansible.builtin.systemd:
|
||||
name: snapd
|
||||
state: started
|
||||
enabled: true
|
||||
become: true
|
||||
|
||||
- name: Get list of installed Snaps
|
||||
ansible.builtin.command: snap list
|
||||
register: installed_snaps
|
||||
changed_when: false
|
||||
|
||||
- name: Define protected system snaps
|
||||
ansible.builtin.set_fact:
|
||||
system_snaps:
|
||||
- snapd
|
||||
- core
|
||||
- core18
|
||||
- core20
|
||||
- core22
|
||||
- bare
|
||||
- gtk-common-themes
|
||||
- gnome-3-28-1804
|
||||
- gnome-3-34-1804
|
||||
- gnome-3-38-2004
|
||||
- gnome-42-2204
|
||||
- desktop-security-center
|
||||
- firmware-updater
|
||||
- prompting-client
|
||||
- snap-store
|
||||
- snapd-desktop-integration
|
||||
|
||||
- name: Define desired Snaps
|
||||
ansible.builtin.set_fact:
|
||||
desired_snaps:
|
||||
- name: telegram-desktop
|
||||
classic: false
|
||||
- name: whatsapp-desktop-client
|
||||
classic: false
|
||||
|
||||
- name: Install desired Snap packages
|
||||
ansible.builtin.command: "snap install {{ item.name }} {{ '--classic' if item.classic else '' }}"
|
||||
loop: "{{ desired_snaps }}"
|
||||
become: true
|
||||
register: snap_install
|
||||
changed_when: "'already installed' not in snap_install.stderr"
|
||||
failed_when:
|
||||
- snap_install.rc != 0
|
||||
- "'already installed' not in snap_install.stderr"
|
||||
|
||||
- name: Remove undesired Snap packages
|
||||
ansible.builtin.command: "snap remove {{ item }}"
|
||||
become: true
|
||||
loop: >-
|
||||
{{
|
||||
installed_snaps.stdout_lines[1:]
|
||||
| map('split', ' ')
|
||||
| map('first')
|
||||
| difference(desired_snaps | map(attribute='name'))
|
||||
| difference(system_snaps)
|
||||
}}
|
||||
register: snap_remove
|
||||
changed_when: snap_remove.rc == 0
|
||||
failed_when:
|
||||
- snap_remove.rc != 0
|
||||
- "'not installed' not in snap_remove.stderr"
|
||||
- "'cannot remove' not in snap_remove.stderr"
|
14
config/ansible/tasks/workstations/symlinks.yml
Normal file
14
config/ansible/tasks/workstations/symlinks.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Set user home directory
|
||||
ansible.builtin.set_fact:
|
||||
user_home: "{{ ansible_env.HOME if ansible_user_id == 'root' else lookup('env', 'HOME') }}"
|
||||
|
||||
- name: Create workstation symlinks
|
||||
ansible.builtin.file:
|
||||
src: "{{ item.src | replace('~', user_home) | replace('$DOTFILES_PATH', lookup('env', 'DOTFILES_PATH')) }}"
|
||||
dest: "{{ item.dest | replace('~', user_home) }}"
|
||||
state: link
|
||||
force: true
|
||||
follow: false
|
||||
loop:
|
||||
- { src: "$DOTFILES_PATH/vscode/settings.json", dest: "~/.config/Code/User/settings.json" }
|
104
config/ansible/tasks/workstations/ulauncher.yml
Normal file
104
config/ansible/tasks/workstations/ulauncher.yml
Normal file
@ -0,0 +1,104 @@
|
||||
---
|
||||
- name: Ensure Ulauncher and dependencies are installed
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- ulauncher
|
||||
# - python3-pytz
|
||||
- python3-tornado
|
||||
- python3-docker
|
||||
- python3-requests
|
||||
- python3-pint
|
||||
- python3-simpleeval
|
||||
- python3-parsedatetime
|
||||
- python3-fuzzywuzzy
|
||||
# - python3-thefuzz
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Ensure Ulauncher config directory exists
|
||||
ansible.builtin.file:
|
||||
path: "~/.config/ulauncher"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Configure Ulauncher settings
|
||||
ansible.builtin.copy:
|
||||
content: "{{ ulauncher_settings | to_json }}"
|
||||
dest: "~/.config/ulauncher/settings.json"
|
||||
mode: "0644"
|
||||
vars:
|
||||
ulauncher_settings:
|
||||
blacklisted-desktop-dirs: >
|
||||
/usr/share/locale:/usr/share/app-install:/usr/share/kservices5:/usr/share/fk5:/usr/share/kservicetypes5:
|
||||
/usr/share/applications/screensavers:/usr/share/kde4:/usr/share/mimelnk
|
||||
clear-previous-query: true
|
||||
disable-desktop-filters: false
|
||||
grab-mouse-pointer: false
|
||||
hotkey-show-app: "<Control>Space"
|
||||
render-on-screen: "mouse-pointer-monitor"
|
||||
show-indicator-icon: true
|
||||
show-recent-apps: "4"
|
||||
terminal-command: "ptyxis"
|
||||
theme-name: "dark"
|
||||
|
||||
- name: Configure Ulauncher shortcuts
|
||||
ansible.builtin.copy:
|
||||
content: "{{ ulauncher_shortcuts | to_json }}"
|
||||
dest: "~/.config/ulauncher/shortcuts.json"
|
||||
mode: "0644"
|
||||
vars:
|
||||
ulauncher_shortcuts:
|
||||
"0bab9d26-5464-4501-bc95-9995d8fa1405":
|
||||
id: "0bab9d26-5464-4501-bc95-9995d8fa1405"
|
||||
name: "Google Search"
|
||||
keyword: "g"
|
||||
cmd: "https://google.com/search?q=%s"
|
||||
icon: "/usr/share/ulauncher/media/google-search-icon.png"
|
||||
is_default_search: true
|
||||
run_without_argument: false
|
||||
added: 0
|
||||
"d72834d1-5d81-4f5d-a9f6-386b12110f56":
|
||||
id: "d72834d1-5d81-4f5d-a9f6-386b12110f56"
|
||||
name: "Stack Overflow"
|
||||
keyword: "so"
|
||||
cmd: "https://stackoverflow.com/search?q=%s"
|
||||
icon: "/usr/share/ulauncher/media/stackoverflow-icon.svg"
|
||||
is_default_search: true
|
||||
run_without_argument: false
|
||||
added: 0
|
||||
"4dfcffeb-879c-49b2-83bb-c16254a7ce75":
|
||||
id: "4dfcffeb-879c-49b2-83bb-c16254a7ce75"
|
||||
name: "GoLink"
|
||||
keyword: "go"
|
||||
cmd: "http://go/%s"
|
||||
icon:
|
||||
is_default_search: false
|
||||
run_without_argument: false
|
||||
added: 0
|
||||
"40d1ed32-8fd3-4bf8-92f5-cbaa7cd607a1":
|
||||
id: "40d1ed32-8fd3-4bf8-92f5-cbaa7cd607a1"
|
||||
name: "NixOS"
|
||||
keyword: "nix"
|
||||
cmd: "https://search.nixos.org/packages?query=%s"
|
||||
icon:
|
||||
is_default_search: false
|
||||
run_without_argument: false
|
||||
added: 0
|
||||
"40d1ed32-8fd3-4ff4-92f6-fbaa7cd607a1":
|
||||
id: "42d1ed42-8dd3-2bf8-92f5-cbaa7cd607a1"
|
||||
name: "Flathub"
|
||||
keyword: "flat"
|
||||
cmd: "https://flathub.org/apps/search?q=%s"
|
||||
icon:
|
||||
is_default_search: false
|
||||
run_without_argument: false
|
||||
added: 0
|
||||
"43d1ed32-8fd3-fbf8-94f5-cffa7cd607a1":
|
||||
id: "40d1ed32-8fd3-4bf8-92f5-cbaa7cd607a1"
|
||||
name: "GitHub"
|
||||
keyword: "gh"
|
||||
cmd: "https://github.com/search?q=%s"
|
||||
icon:
|
||||
is_default_search: false
|
||||
run_without_argument: false
|
||||
added: 0
|
53
config/ansible/tasks/workstations/vscode.yml
Normal file
53
config/ansible/tasks/workstations/vscode.yml
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Gather OS facts
|
||||
ansible.builtin.setup:
|
||||
filter: ansible_distribution
|
||||
register: os_facts
|
||||
|
||||
- name: Import Microsoft GPG key (Fedora)
|
||||
ansible.builtin.rpm_key:
|
||||
key: https://packages.microsoft.com/keys/microsoft.asc
|
||||
when: os_facts.ansible_facts.ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Add VSCode repository (Fedora)
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
[code]
|
||||
name=Visual Studio Code
|
||||
baseurl=https://packages.microsoft.com/yumrepos/vscode
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://packages.microsoft.com/keys/microsoft.asc
|
||||
dest: /etc/yum.repos.d/vscode.repo
|
||||
mode: "0644"
|
||||
when: os_facts.ansible_facts.ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Add VSCode repository (Ubuntu/Debian)
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main"
|
||||
state: present
|
||||
when: os_facts.ansible_facts.ansible_distribution in ['Ubuntu', 'Debian']
|
||||
|
||||
- name: Import Microsoft GPG key (Ubuntu/Debian)
|
||||
ansible.builtin.apt_key:
|
||||
url: https://packages.microsoft.com/keys/microsoft.asc
|
||||
state: present
|
||||
when: os_facts.ansible_facts.ansible_distribution in ['Ubuntu', 'Debian']
|
||||
|
||||
- name: Check if VSCode is installed
|
||||
ansible.builtin.command: code --version
|
||||
register: vscode_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Install VSCode (Fedora)
|
||||
ansible.builtin.package:
|
||||
name: code
|
||||
state: present
|
||||
when: vscode_check.rc != 0 and os_facts.ansible_facts.ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Install VSCode (Ubuntu/Debian)
|
||||
ansible.builtin.apt:
|
||||
name: code
|
||||
state: present
|
||||
when: vscode_check.rc != 0 and os_facts.ansible_facts.ansible_distribution in ['Ubuntu', 'Debian']
|
61
config/ansible/tasks/workstations/workstation.yml
Normal file
61
config/ansible/tasks/workstations/workstation.yml
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Workstation Setup
|
||||
block:
|
||||
- name: Include workstation symlinks tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/symlinks.yml
|
||||
|
||||
- name: Include GNOME Extensions tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/gnome-extensions.yml
|
||||
|
||||
- name: Include Firefox APT installation tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/firefox-apt.yml
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Include flatpaks tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/flatpaks.yml
|
||||
|
||||
- name: Include snaps tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/snaps.yml
|
||||
|
||||
- name: Include VSCode tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/vscode.yml
|
||||
become: true
|
||||
|
||||
- name: Include Zen browser tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/zen-browser.yml
|
||||
vars:
|
||||
browser_name: "zen"
|
||||
browser_executable: "zen"
|
||||
|
||||
- name: Include 1Password Browsers tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/1password-browsers.yml
|
||||
|
||||
- name: Include Firefox PWA tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/firefoxpwa.yml
|
||||
|
||||
- name: Include Ulauncher tasks
|
||||
ansible.builtin.import_tasks: tasks/workstations/ulauncher.yml
|
||||
|
||||
- name: Ensure workstation common packages are installed
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
###### THE FOLLOWING PACKAGES ARE DISABLED DUE TO MISSING ON UBUNTU REPOS ######
|
||||
# Steam and it's dependencies
|
||||
# - steam
|
||||
# - steam-devices
|
||||
################################################################################
|
||||
# Statistics HUD for gaming
|
||||
- mangohud
|
||||
# Used for VSCode Extensions
|
||||
- nodejs
|
||||
# File Manager
|
||||
- nemo
|
||||
# File Manager Extensions
|
||||
- nemo-compare
|
||||
- nemo-data
|
||||
- nemo-fileroller
|
||||
- nemo-font-manager
|
||||
- nemo-gtkhash
|
||||
- nemo-python
|
||||
state: present
|
||||
become: true
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user