Compare commits

...

99 Commits

Author SHA1 Message Date
9f949f13d1 minecraft: update mods + add modernfix + debugify 2026-02-28 02:30:19 -05:00
59080fe1b3 fmt 2026-02-28 02:25:38 -05:00
12fca8840d update 2026-02-28 01:57:42 -05:00
49f06fc26c arr-init: extract to standalone flake repo 2026-02-27 15:39:19 -05:00
2c0811cfe9 minecraft: make more responsive 2026-02-27 00:04:07 -05:00
9692fe5f08 update 2026-02-25 19:01:56 -05:00
c142b5d045 ntfy-alerts: suppress notifications for sanoid 2026-02-25 02:10:37 -05:00
16c84fdcb6 zfs: fix sanoid dataset name for jellyfin cache 2026-02-24 21:21:24 -05:00
196f06e41f flake: expose tests as checks output 2026-02-24 14:51:11 -05:00
8013435d99 ntfy-alerts: init 2026-02-24 14:44:00 -05:00
28e3090c72 matrix: update 2026-02-24 13:05:00 -05:00
a22c5b30fe update 2026-02-24 13:04:55 -05:00
c2908f594c matrix: update 2026-02-23 15:24:31 -05:00
9df3f3cae9 update 2026-02-23 15:13:47 -05:00
ea75dad5ba matrix: update 2026-02-21 22:47:33 -05:00
1e25d86d44 update 2026-02-21 22:19:36 -05:00
23475927a1 qbt: increase ConnectionSpeed 2026-02-20 15:27:56 -05:00
fe4040bf3b matrix: update 2026-02-20 15:25:44 -05:00
d91b651152 formating 2026-02-20 15:19:46 -05:00
0a3f93c98d qbt: fix permissions 2026-02-20 14:12:13 -05:00
304ad7f308 qbt: tweak 2026-02-20 11:05:53 -05:00
4fe33b9b32 update 2026-02-19 23:15:26 -05:00
0a0c14993d qbt: Coalesce Read Write 2026-02-19 23:14:27 -05:00
155ebbafcd qbittorrent: enable queueing and AutoTMM 2026-02-19 23:12:18 -05:00
2fed80cdb2 firewall: trust wg-br interface 2026-02-19 23:12:18 -05:00
318908d8ca arr-init: add module for API-based configuration 2026-02-19 23:12:16 -05:00
c35a65e1bf recyclarr: init 2026-02-19 19:12:19 -05:00
af3a3d738e jellyseerr: init 2026-02-19 19:12:19 -05:00
879a3278ee bazarr: init 2026-02-19 19:12:19 -05:00
89d939d37f radarr: init 2026-02-19 19:12:19 -05:00
c290671b52 sonarr: init 2026-02-19 19:12:19 -05:00
ba09476295 prowlarr: init 2026-02-19 19:12:01 -05:00
9b715ba110 qbt: GlobalMaxRatio 6.0 -> 7.0 2026-02-17 22:56:48 -05:00
f6628b9302 jellyfin-qbittorrent-monitor: add stream headroom 2026-02-17 14:31:34 -05:00
7484a11535 jellyfin-qbittorrent-monitor: fix upload 2026-02-17 14:00:05 -05:00
d46ccc8245 update 2026-02-17 00:27:56 -05:00
1988f1a28d minecraft: update mods 2026-02-16 21:57:49 -05:00
9a9ecc6556 jellyfin-qbittorrent-monitor: dynamic bandwidth management 2026-02-15 23:33:45 -05:00
cf3e876f27 matrix: update 2026-02-15 11:51:38 -05:00
935ca6361b update 2026-02-14 22:50:45 -05:00
aa219dcfff matrix: update 2026-02-14 22:49:50 -05:00
62a91a8615 fmt 2026-02-13 15:26:27 -05:00
c01b2336a7 matrix: fix elementx calls
Applies patch from: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1370
That I am working on. Also updates version to latest (at this time) git
2026-02-13 15:26:17 -05:00
f5abfd5bf6 fix(no-rgb): handle transient hardware unavailability during deploy 2026-02-12 18:48:41 -05:00
82add97a80 feat(tmpfiles): defer per-service file permissions to reduce boot time 2026-02-12 18:48:29 -05:00
84cbe82cb0 update 2026-02-12 12:45:28 -05:00
4e9e3f627b matrix: setup livekit
Needed for element X calls.
2026-02-11 22:14:12 -05:00
9cc63fcfb8 impermanence: fix /etc permissions after re-deploy 2026-02-11 15:41:30 -05:00
35f0c08ee2 ntfy: fix directory 2026-02-10 18:47:17 -05:00
0f1e249127 ntfy 2026-02-10 17:39:01 -05:00
f3e972b3a4 matrix: fix registration 2026-02-10 14:49:58 -05:00
e28f8a70df matrix: add coturn 2026-02-10 14:49:50 -05:00
f27068a974 matrix: fix private folder 2026-02-10 14:22:53 -05:00
795c5b3d41 Revert "matrix: disable"
This reverts commit a887edf510.
2026-02-10 14:08:43 -05:00
a887edf510 matrix: disable 2026-02-10 13:55:45 -05:00
4f71f61c4b matrix: fix continuwuity module 2026-02-10 13:54:22 -05:00
3187130cd3 update 2026-02-10 12:56:12 -05:00
11ab6de305 re-add matrix 2026-02-10 12:49:56 -05:00
b67416a74b syncthing: add grayjay backups 2026-02-06 14:43:08 -05:00
954e124b49 potentially fix fail2ban 2026-02-05 15:11:17 -05:00
a7d6018592 update 2026-02-05 01:33:55 -05:00
37fdf13a3f update 2026-02-03 12:25:24 -05:00
8176376f48 update 2026-02-01 21:30:50 -05:00
58c804ea41 update 2026-01-30 00:43:28 -05:00
a61fedb015 fail2ban: ignoreip from local network 2026-01-27 18:51:08 -05:00
2183ea8363 update 2026-01-26 23:09:22 -05:00
27ffe38ed3 xmrig: 12 threads 2026-01-26 17:51:16 -05:00
a0e6b8428e xmrig: 1gb pages 2026-01-26 14:25:25 -05:00
0b01fc3f28 xmrig 2026-01-26 14:15:27 -05:00
016520c579 update 2026-01-23 12:56:54 -05:00
47cc12f4ed cleanup 2026-01-23 00:29:24 -05:00
a766e67fec cleanup minecraft test 2026-01-22 22:40:40 -05:00
fdb1b559bc wg: don't hardcode namespaceAddress 2026-01-22 14:56:36 -05:00
3026897113 Revert "minecraft: fail2ban"
This reverts commit a23b3d8c5f.
2026-01-22 14:25:52 -05:00
a23b3d8c5f minecraft: fail2ban 2026-01-21 20:21:23 -05:00
4bf05f8b51 hostPlatform -> targetPlatform 2026-01-21 15:25:25 -05:00
d15ec9fe0b fix squaremap 2026-01-21 14:26:39 -05:00
89627e1299 update 2026-01-20 23:08:55 -05:00
897f9b2642 flake: impermanence nixpkgs follow nixpkgs 2026-01-20 23:08:41 -05:00
f87e395225 jellyfin-qbittorrent-monitor: don't use mock qbittorrent 2026-01-20 23:05:15 -05:00
9770e6d667 jellyfin-qbittorrent-monitor: fix mock qbittorrent 2026-01-20 22:38:18 -05:00
8ed67464d0 fmt 2026-01-20 19:48:20 -05:00
da6b4d1915 tests: fix all fail2ban NixOS VM tests
- Add explicit iptables banaction in security.nix for test compatibility
- Force IPv4 in all curl requests to prevent IPv4/IPv6 mismatch issues
- Fix caddy test: use basic_auth directive (not basicauth)
- Override service ports in tests to match direct connections (not via Caddy)
- Vaultwarden: override ROCKET_ADDRESS and ROCKET_LOG for external access
- Immich: increase VM memory to 4GB for stability
- Jellyfin: create placeholder log file and reload fail2ban after startup
- Add tests.nix entries for all 6 fail2ban tests

All tests now pass: ssh, caddy, gitea, vaultwarden, immich, jellyfin
2026-01-20 18:41:01 -05:00
f2ef562724 fail2ban: implement for jellyfin 2026-01-20 14:46:49 -05:00
d9236152aa fail2ban: implement for immich 2026-01-20 14:39:38 -05:00
ba45743ea0 fail2ban: implement for gitea 2026-01-20 14:39:29 -05:00
0214621a58 fail2ban: implement for bitwarden 2026-01-20 14:39:23 -05:00
aa2c61dcd3 fail2ban: implement for caddy basic auth 2026-01-20 14:35:20 -05:00
b550e495c8 nit: move fail2ban to security module 2026-01-20 14:11:15 -05:00
5ad5aff5e8 ssh: add fail2ban 2026-01-20 14:05:02 -05:00
d9a1a01f7f jellyfin-qbittorrent-monitor: handle qbittorrent going down state 2026-01-19 02:42:18 -05:00
eb5d0bb093 security things 2026-01-18 02:36:00 -05:00
c6b39a98cd update 2026-01-18 01:03:18 -05:00
11cacffe7d update 2026-01-15 14:01:27 -05:00
4881780186 monero: move back to hdds 2026-01-15 13:51:25 -05:00
f83e1170af syncthing 2026-01-13 16:55:19 -05:00
a93c789278 jellyfin-qbittorrent-monitor: don't mock out jellyfin for testing 2026-01-13 14:15:11 -05:00
df1d983b63 rework qbittorrent jellyfin monitor test 2026-01-13 13:41:23 -05:00
de89e70a05 impermanence: fix /etc/zfs cache 2026-01-13 13:13:49 -05:00
56 changed files with 3381 additions and 553 deletions

View File

@@ -18,6 +18,8 @@
./modules/age-secrets.nix ./modules/age-secrets.nix
./modules/secureboot.nix ./modules/secureboot.nix
./modules/no-rgb.nix ./modules/no-rgb.nix
./modules/security.nix
./modules/ntfy-alerts.nix
./services/postgresql.nix ./services/postgresql.nix
./services/jellyfin.nix ./services/jellyfin.nix
@@ -31,13 +33,26 @@
./services/jellyfin-qbittorrent-monitor.nix ./services/jellyfin-qbittorrent-monitor.nix
./services/bitmagnet.nix ./services/bitmagnet.nix
./services/arr/prowlarr.nix
./services/arr/sonarr.nix
./services/arr/radarr.nix
./services/arr/bazarr.nix
./services/arr/jellyseerr.nix
./services/arr/recyclarr.nix
./services/arr/init.nix
./services/soulseek.nix ./services/soulseek.nix
./services/ups.nix ./services/ups.nix
./services/bitwarden.nix ./services/bitwarden.nix
./services/matrix.nix
./services/coturn.nix
./services/livekit.nix
./services/monero.nix ./services/monero.nix
./services/xmrig.nix
# KEEP UNTIL 2028 # KEEP UNTIL 2028
./services/caddy_senior_project.nix ./services/caddy_senior_project.nix
@@ -45,6 +60,11 @@
./services/graphing-calculator.nix ./services/graphing-calculator.nix
./services/ssh.nix ./services/ssh.nix
./services/syncthing.nix
./services/ntfy.nix
./services/ntfy-alerts.nix
]; ];
services.kmscon.enable = true; services.kmscon.enable = true;
@@ -113,6 +133,37 @@
compressor = "zstd"; compressor = "zstd";
supportedFilesystems = [ "f2fs" ]; supportedFilesystems = [ "f2fs" ];
}; };
# BBR congestion control handles variable-latency VPN connections much
# better than CUBIC by probing bandwidth continuously rather than
# reacting to packet loss.
kernelModules = [ "tcp_bbr" ];
kernel.sysctl = {
# Use BBR + fair queuing for smooth throughput through the WireGuard VPN
"net.core.default_qdisc" = "fq";
"net.ipv4.tcp_congestion_control" = "bbr";
# Disable slow-start after idle: prevents TCP from resetting window
# size on each burst cycle (the primary cause of the 0 -> 40 MB/s spikes)
"net.ipv4.tcp_slow_start_after_idle" = 0;
# Larger socket buffers to accommodate the VPN bandwidth-delay product
# (22ms RTT * target throughput). Current 2.5MB max is too small.
"net.core.rmem_max" = 16777216;
"net.core.wmem_max" = 16777216;
"net.ipv4.tcp_rmem" = "4096 87380 16777216";
"net.ipv4.tcp_wmem" = "4096 65536 16777216";
# Higher backlog for the large number of concurrent torrent connections
"net.core.netdev_max_backlog" = 5000;
# Minecraft server optimizations
# Disable autogroup for better scheduling of game server threads
"kernel.sched_autogroup_enabled" = 0;
# Huge pages for Minecraft JVM (4000MB heap / 2MB per page + ~200 overhead)
"vm.nr_hugepages" = 2200;
};
}; };
environment.etc = { environment.etc = {
@@ -182,6 +233,7 @@
hostName = hostname; hostName = hostname;
hostId = "0f712d56"; hostId = "0f712d56";
firewall.enable = true; firewall.enable = true;
firewall.trustedInterfaces = [ "wg-br" ];
useDHCP = false; useDHCP = false;
enableIPv6 = false; enableIPv6 = false;

167
flake.lock generated
View File

@@ -12,11 +12,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1762618334, "lastModified": 1770165109,
"narHash": "sha256-wyT7Pl6tMFbFrs8Lk/TlEs81N6L+VSybPfiIgzU8lbQ=", "narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=",
"owner": "ryantm", "owner": "ryantm",
"repo": "agenix", "repo": "agenix",
"rev": "fcdea223397448d35d9b31f798479227e80183f6", "rev": "b027ee29d959fda4b60b57566d64c98a202e0feb",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -25,13 +25,33 @@
"type": "github" "type": "github"
} }
}, },
"arr-init": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1772249948,
"narHash": "sha256-v68tO12mTCET68eZG583U+OlBL4f6kAoHS9iKA/xLzQ=",
"ref": "refs/heads/main",
"rev": "d21eb9f5b0a30bb487de7c0afbbbaf19324eaa49",
"revCount": 1,
"type": "git",
"url": "ssh://gitea@git.gardling.com/titaniumtown/arr-init"
},
"original": {
"type": "git",
"url": "ssh://gitea@git.gardling.com/titaniumtown/arr-init"
}
},
"crane": { "crane": {
"locked": { "locked": {
"lastModified": 1767744144, "lastModified": 1771796463,
"narHash": "sha256-9/9ntI0D+HbN4G0TrK3KmHbTvwgswz7p8IEJsWyef8Q=", "narHash": "sha256-9bCDuUzpwJXcHMQYMS1yNuzYMmKO/CCwCexpjWOl62I=",
"owner": "ipetkov", "owner": "ipetkov",
"repo": "crane", "repo": "crane",
"rev": "2fb033290bf6b23f226d4c8b32f7f7a16b043d7e", "rev": "3d3de3313e263e04894f284ac18177bd26169bad",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -49,11 +69,11 @@
"utils": "utils" "utils": "utils"
}, },
"locked": { "locked": {
"lastModified": 1766051518, "lastModified": 1770019181,
"narHash": "sha256-znKOwPXQnt3o7lDb3hdf19oDo0BLP4MfBOYiWkEHoik=", "narHash": "sha256-hwsYgDnby50JNVpTRYlF3UR/Rrpt01OrxVuryF40CFY=",
"owner": "serokell", "owner": "serokell",
"repo": "deploy-rs", "repo": "deploy-rs",
"rev": "d5eff7f948535b9c723d60cd8239f8f11ddc90fa", "rev": "77c906c0ba56aabdbc72041bf9111b565cdd6171",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -69,11 +89,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1766150702, "lastModified": 1771881364,
"narHash": "sha256-P0kM+5o+DKnB6raXgFEk3azw8Wqg5FL6wyl9jD+G5a4=", "narHash": "sha256-A5uE/hMium5of/QGC6JwF5TGoDAfpNtW00T0s9u/PN8=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "916506443ecd0d0b4a0f4cf9d40a3c22ce39b378", "rev": "a4cb7bf73f264d40560ba527f9280469f1f081c6",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -131,24 +151,6 @@
} }
}, },
"flake-utils": { "flake-utils": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": { "inputs": {
"systems": "systems_4" "systems": "systems_4"
}, },
@@ -195,11 +197,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1767910483, "lastModified": 1772020340,
"narHash": "sha256-MOU5YdVu4DVwuT5ztXgQpPuRRBjSjUGIdUzOQr9iQOY=", "narHash": "sha256-aqBl3GNpCadMoJ/hVkWTijM1Aeilc278MjM+LA3jK6g=",
"owner": "nix-community", "owner": "nix-community",
"repo": "home-manager", "repo": "home-manager",
"rev": "82fb7dedaad83e5e279127a38ef410bcfac6d77c", "rev": "36e38ca0d9afe4c55405fdf22179a5212243eecc",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -217,11 +219,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1747978958, "lastModified": 1768598210,
"narHash": "sha256-pQQnbxWpY3IiZqgelXHIe/OAE/Yv4NSQq7fch7M6nXQ=", "narHash": "sha256-kkgA32s/f4jaa4UG+2f8C225Qvclxnqs76mf8zvTVPg=",
"owner": "nix-community", "owner": "nix-community",
"repo": "home-manager", "repo": "home-manager",
"rev": "7419250703fd5eb50e99bdfb07a86671939103ea", "rev": "c47b2cc64a629f8e075de52e4742de688f930dc6",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -233,14 +235,16 @@
"impermanence": { "impermanence": {
"inputs": { "inputs": {
"home-manager": "home-manager_2", "home-manager": "home-manager_2",
"nixpkgs": "nixpkgs" "nixpkgs": [
"nixpkgs"
]
}, },
"locked": { "locked": {
"lastModified": 1767822991, "lastModified": 1769548169,
"narHash": "sha256-iyrn9AcPZCoyxX4OT8eMkBsjG7SRUQXXS/V1JzxS7rA=", "narHash": "sha256-03+JxvzmfwRu+5JafM0DLbxgHttOQZkUtDWBmeUkN8Y=",
"owner": "nix-community", "owner": "nix-community",
"repo": "impermanence", "repo": "impermanence",
"rev": "82e5bc4508cab9e8d5a136626276eb5bbce5e9c5", "rev": "7b1d382faf603b6d264f58627330f9faa5cba149",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -259,11 +263,11 @@
"rust-overlay": "rust-overlay" "rust-overlay": "rust-overlay"
}, },
"locked": { "locked": {
"lastModified": 1768307256, "lastModified": 1772216104,
"narHash": "sha256-3yDvlAqWa0Vk3B9hFRJJrSs1xc+FwVQFLtu//VrTR4c=", "narHash": "sha256-1TnGN26vnCEQk5m4AavJZxGZTb/6aZyphemRPRwFUfs=",
"owner": "nix-community", "owner": "nix-community",
"repo": "lanzaboote", "repo": "lanzaboote",
"rev": "7e031eb535a494582f4fc58735b5aecba7b57058", "rev": "dbe5112de965bbbbff9f0729a9789c20a65ab047",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -275,17 +279,17 @@
"nix-minecraft": { "nix-minecraft": {
"inputs": { "inputs": {
"flake-compat": "flake-compat_3", "flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs"
] ],
"systems": "systems_3"
}, },
"locked": { "locked": {
"lastModified": 1767838769, "lastModified": 1772160153,
"narHash": "sha256-KCLU6SUU80tEBKIVZsBrSjRYX6kn1eVIYI3fEEqOp24=", "narHash": "sha256-lk5IxQzY9ZeeEyjKNT7P6dFnlRpQgkus4Ekc/+slypY=",
"owner": "Infinidoge", "owner": "Infinidoge",
"repo": "nix-minecraft", "repo": "nix-minecraft",
"rev": "4da21f019f6443f513f16af7f220ba4db1cdfc04", "rev": "deca3fb710b502ba10cd5cdc8f66c2cc184b92df",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -296,11 +300,11 @@
}, },
"nixos-hardware": { "nixos-hardware": {
"locked": { "locked": {
"lastModified": 1767185284, "lastModified": 1771969195,
"narHash": "sha256-ljDBUDpD1Cg5n3mJI81Hz5qeZAwCGxon4kQW3Ho3+6Q=", "narHash": "sha256-qwcDBtrRvJbrrnv1lf/pREQi8t2hWZxVAyeMo7/E9sw=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixos-hardware", "repo": "nixos-hardware",
"rev": "40b1a28dce561bea34858287fbb23052c3ee63fe", "rev": "41c6b421bdc301b2624486e11905c9af7b8ec68e",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -312,27 +316,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1748026106, "lastModified": 1772047000,
"narHash": "sha256-6m1Y3/4pVw1RWTsrkAK2VMYSzG4MMIj7sqUy7o8th1o=", "narHash": "sha256-7DaQVv4R97cii/Qdfy4tmDZMB2xxtyIvNGSwXBBhSmo=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "063f43f2dbdef86376cc29ad646c45c46e93234c",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1768242861,
"narHash": "sha256-F4IIxa5xDHjtrmMcayM8lHctUq1oGltfBQu2+oqDWP4=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "1327e798cb055f96f92685df444e9a2c326ab5ed", "rev": "1267bb4920d0fc06ea916734c11b0bf004bbe17e",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -342,7 +330,7 @@
"type": "github" "type": "github"
} }
}, },
"nixpkgs_3": { "nixpkgs_2": {
"locked": { "locked": {
"lastModified": 1764517877, "lastModified": 1764517877,
"narHash": "sha256-pp3uT4hHijIC8JUK5MEqeAWmParJrgBVzHLNfJDZxg4=", "narHash": "sha256-pp3uT4hHijIC8JUK5MEqeAWmParJrgBVzHLNfJDZxg4=",
@@ -368,11 +356,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1767281941, "lastModified": 1771858127,
"narHash": "sha256-6MkqajPICgugsuZ92OMoQcgSHnD6sJHwk8AxvMcIgTE=", "narHash": "sha256-Gtre9YoYl3n25tJH2AoSdjuwcqij5CPxL3U3xysYD08=",
"owner": "cachix", "owner": "cachix",
"repo": "pre-commit-hooks.nix", "repo": "pre-commit-hooks.nix",
"rev": "f0927703b7b1c8d97511c4116eb9b4ec6645a0fa", "rev": "49bbbfc218bf3856dfa631cead3b052d78248b83",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -384,6 +372,7 @@
"root": { "root": {
"inputs": { "inputs": {
"agenix": "agenix", "agenix": "agenix",
"arr-init": "arr-init",
"deploy-rs": "deploy-rs", "deploy-rs": "deploy-rs",
"disko": "disko", "disko": "disko",
"home-manager": "home-manager", "home-manager": "home-manager",
@@ -391,7 +380,7 @@
"lanzaboote": "lanzaboote", "lanzaboote": "lanzaboote",
"nix-minecraft": "nix-minecraft", "nix-minecraft": "nix-minecraft",
"nixos-hardware": "nixos-hardware", "nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs_2", "nixpkgs": "nixpkgs",
"senior_project-website": "senior_project-website", "senior_project-website": "senior_project-website",
"srvos": "srvos", "srvos": "srvos",
"trackerlist": "trackerlist", "trackerlist": "trackerlist",
@@ -408,11 +397,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768272338, "lastModified": 1771988922,
"narHash": "sha256-Tg/kL8eKMpZtceDvBDQYU8zowgpr7ucFRnpP/AtfuRM=", "narHash": "sha256-Fc6FHXtfEkLtuVJzd0B6tFYMhmcPLuxr90rWfb/2jtQ=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "03dda130a8701b08b0347fcaf850a190c53a3c1e", "rev": "f4443dc3f0b6c5e6b77d923156943ce816d1fcb9",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -445,11 +434,11 @@
"senior_project-website": { "senior_project-website": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1768253064, "lastModified": 1771869552,
"narHash": "sha256-Lp3k2BhOWo7bYRcGuV0ltgVYr+0+1QCcpuB7kK4pvOE=", "narHash": "sha256-veaVrRWCSy7HYAAjUFLw8HASKcj+3f0W+sCwS3QiaM4=",
"owner": "Titaniumtown", "owner": "Titaniumtown",
"repo": "senior-project-website", "repo": "senior-project-website",
"rev": "f86a1c80c58d1c292b4673e28e892de13fb78a25", "rev": "28a2b93492dac877dce0b38f078eacf74fce26e7",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -465,11 +454,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768182633, "lastModified": 1772071250,
"narHash": "sha256-hH2yT/KOwvw6kpJ9S68KEqq4G//o3tisL/1y1W3QbMA=", "narHash": "sha256-LDWvJDR1J8xE8TBJjzWnOA0oVP/l9xBFC4npQPJDHN4=",
"owner": "nix-community", "owner": "nix-community",
"repo": "srvos", "repo": "srvos",
"rev": "43dd76be5957fea8db9a1948c182597c7db81f97", "rev": "5cd73bcf984b72d8046e1175d13753de255adfb9",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -541,11 +530,11 @@
"trackerlist": { "trackerlist": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1768259319, "lastModified": 1772233783,
"narHash": "sha256-kB+XRKahig2LTD14ypfYbR1QsOel6E35lIxLENleV/E=", "narHash": "sha256-2jPUBKpPuT4dCXwVFuZvTH3QyURixsfJZD7Zqs0atPY=",
"owner": "ngosang", "owner": "ngosang",
"repo": "trackerslist", "repo": "trackerslist",
"rev": "3f5537d696a42c5a4a97dc9c7abf0a82fcce40eb", "rev": "85c4f103f130b070a192343c334f50c2f56b61a9",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -605,8 +594,8 @@
}, },
"ytbn-graphing-software": { "ytbn-graphing-software": {
"inputs": { "inputs": {
"flake-utils": "flake-utils_2", "flake-utils": "flake-utils",
"nixpkgs": "nixpkgs_3", "nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay_2" "rust-overlay": "rust-overlay_2"
}, },
"locked": { "locked": {

102
flake.nix
View File

@@ -40,6 +40,7 @@
impermanence = { impermanence = {
url = "github:nix-community/impermanence"; url = "github:nix-community/impermanence";
inputs.nixpkgs.follows = "nixpkgs";
}; };
agenix = { agenix = {
@@ -67,6 +68,11 @@
ytbn-graphing-software = { ytbn-graphing-software = {
url = "git+https://git.gardling.com/titaniumtown/YTBN-Graphing-Software"; url = "git+https://git.gardling.com/titaniumtown/YTBN-Graphing-Software";
}; };
arr-init = {
url = "git+ssh://gitea@git.gardling.com/titaniumtown/arr-init";
inputs.nixpkgs.follows = "nixpkgs";
};
}; };
outputs = outputs =
@@ -82,7 +88,7 @@
srvos, srvos,
deploy-rs, deploy-rs,
impermanence, impermanence,
agenix, arr-init,
... ...
}@inputs: }@inputs:
let let
@@ -99,7 +105,10 @@
music_dir = "/${zpool_ssds}/music"; music_dir = "/${zpool_ssds}/music";
media_group = "media"; media_group = "media";
cpu_arch = "znver3";
ports = { ports = {
http = 80;
https = 443; https = 443;
jellyfin = 8096; # no services.jellyfin option for this jellyfin = 8096; # no services.jellyfin option for this
torrent = 6011; torrent = 6011;
@@ -110,12 +119,27 @@
soulseek_listen = 50300; soulseek_listen = 50300;
llama_cpp = 8991; llama_cpp = 8991;
vaultwarden = 8222; vaultwarden = 8222;
syncthing_gui = 8384;
syncthing_protocol = 22000;
syncthing_discovery = 21027;
minecraft = 25565;
matrix = 6167;
matrix_federation = 8448;
coturn = 3478;
coturn_tls = 5349;
ntfy = 2586;
livekit = 7880;
lk_jwt = 8081;
prowlarr = 9696;
sonarr = 8989;
radarr = 7878;
bazarr = 6767;
jellyseerr = 5055;
}; };
https = { https = {
certs = services_dir + "/http_certs"; certs = services_dir + "/http_certs";
domain = "gardling.com"; domain = "gardling.com";
wg_ip = "192.168.15.1";
}; };
gitea = { gitea = {
@@ -160,14 +184,66 @@
monero = { monero = {
dataDir = services_dir + "/monero"; dataDir = services_dir + "/monero";
}; };
matrix = {
dataDir = "/var/lib/continuwuity";
domain = "matrix.${https.domain}";
};
ntfy = {
domain = "ntfy.${https.domain}";
};
livekit = {
domain = "livekit.${https.domain}";
};
syncthing = {
dataDir = services_dir + "/syncthing";
signalBackupDir = "/${zpool_ssds}/bak/signal";
grayjayBackupDir = "/${zpool_ssds}/bak/grayjay";
};
prowlarr = {
dataDir = services_dir + "/prowlarr";
};
sonarr = {
dataDir = services_dir + "/sonarr";
};
radarr = {
dataDir = services_dir + "/radarr";
};
bazarr = {
dataDir = services_dir + "/bazarr";
};
jellyseerr = {
configDir = services_dir + "/jellyseerr";
};
recyclarr = {
dataDir = services_dir + "/recyclarr";
};
media = {
moviesDir = torrents_path + "/media/movies";
tvDir = torrents_path + "/media/tv";
};
}; };
pkgs = import nixpkgs { pkgs = import nixpkgs {
inherit system; inherit system;
hostPlatform = system; targetPlatform = system;
buildPlatform = builtins.currentSystem; buildPlatform = builtins.currentSystem;
}; };
lib = import ./modules/lib.nix { inherit inputs pkgs; }; lib = import ./modules/lib.nix { inherit inputs pkgs service_configs; };
testSuite = import ./tests/tests.nix {
inherit pkgs lib inputs;
config = self.nixosConfigurations.muffin.config;
};
in in
{ {
formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-tree; formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-tree;
@@ -215,10 +291,17 @@
nix-minecraft.overlay nix-minecraft.overlay
(import ./modules/overlays.nix) (import ./modules/overlays.nix)
]; ];
nixpkgs.config.allowUnfreePredicate =
pkg:
builtins.elem (nixpkgs.lib.getName pkg) [
"minecraft-server"
];
} }
lanzaboote.nixosModules.lanzaboote lanzaboote.nixosModules.lanzaboote
arr-init.nixosModules.default
home-manager.nixosModules.home-manager home-manager.nixosModules.home-manager
( (
{ {
@@ -247,14 +330,9 @@
}; };
}; };
packages.${system} = checks.${system} = testSuite;
let
testSuite = import ./tests/tests.nix { packages.${system} = {
inherit pkgs lib inputs;
config = self.nixosConfigurations.muffin.config;
};
in
{
tests = pkgs.linkFarm "all-tests" ( tests = pkgs.linkFarm "all-tests" (
pkgs.lib.mapAttrsToList (name: test: { pkgs.lib.mapAttrsToList (name: test: {
name = name; name = name;

View File

@@ -65,5 +65,20 @@
owner = "root"; owner = "root";
group = "root"; group = "root";
}; };
# ntfy-alerts secrets
ntfy-alerts-topic = {
file = ../secrets/ntfy-alerts-topic.age;
mode = "0400";
owner = "root";
group = "root";
};
ntfy-alerts-token = {
file = ../secrets/ntfy-alerts-token.age;
mode = "0400";
owner = "root";
group = "root";
};
}; };
} }

View File

@@ -20,14 +20,15 @@
"/var/lib/nixos" "/var/lib/nixos"
"/var/lib/systemd/timers" "/var/lib/systemd/timers"
# ZFS cache directory - persisting the directory instead of the file
# avoids "device busy" errors when ZFS atomically updates the cache
"/etc/zfs"
]; ];
files = [ files = [
# Machine ID # Machine ID
"/etc/machine-id" "/etc/machine-id"
# ZFS cache
"/etc/zfs/zpool.cache"
]; ];
users.${username} = { users.${username} = {
@@ -57,7 +58,13 @@
} }
]; ];
# Enforce root ownership on /persistent/etc. The impermanence activation
# script copies ownership from /persistent/etc to /etc via
# `chown --reference`. If /persistent/etc ever gets non-root ownership,
# sshd StrictModes rejects /etc/ssh/authorized_keys.d/root and root SSH
# breaks while non-root users still work.
# Use "z" (set ownership, non-recursive) not "d" (create only, no-op on existing).
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /etc 755 root" "z /persistent/etc 0755 root root"
]; ];
} }

View File

@@ -1,6 +1,7 @@
{ {
inputs, inputs,
pkgs, pkgs,
service_configs,
... ...
}: }:
inputs.nixpkgs.lib.extend ( inputs.nixpkgs.lib.extend (
@@ -28,8 +29,8 @@ inputs.nixpkgs.lib.extend (
pkg: pkg:
final.optimizeWithFlags pkg [ final.optimizeWithFlags pkg [
"-O3" "-O3"
"-march=znver3" "-march=${service_configs.cpu_arch}"
"-mtune=znver3" "-mtune=${service_configs.cpu_arch}"
]; ];
vpnNamespaceOpenPort = vpnNamespaceOpenPort =
@@ -154,5 +155,30 @@ inputs.nixpkgs.lib.extend (
# } # }
#]; #];
}; };
serviceFilePerms =
serviceName: tmpfilesRules:
{ pkgs, ... }:
let
confFile = pkgs.writeText "${serviceName}-file-perms.conf" (
lib.concatStringsSep "\n" tmpfilesRules
);
in
{
systemd.services."${serviceName}-file-perms" = {
after = [ "${serviceName}-mounts.service" ];
before = [ "${serviceName}.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.systemd}/bin/systemd-tmpfiles --create ${confFile}";
};
};
systemd.services.${serviceName} = {
wants = [ "${serviceName}-file-perms.service" ];
after = [ "${serviceName}-file-perms.service" ];
};
};
} }
) )

View File

@@ -17,13 +17,27 @@
]; ];
text = '' text = ''
#!/bin/sh # Retry loop to wait for hardware to be ready
set -e NUM_DEVICES=0
for attempt in 1 2 3 4 5; do
DEVICE_LIST=$(openrgb --noautoconnect --list-devices 2>/dev/null) || DEVICE_LIST=""
NUM_DEVICES=$(echo "$DEVICE_LIST" | grep -cE '^[0-9]+: ') || NUM_DEVICES=0
if [ "$NUM_DEVICES" -gt 0 ]; then
break
fi
if [ "$attempt" -lt 5 ]; then
sleep 2
fi
done
NUM_DEVICES=$(openrgb --noautoconnect --list-devices | grep -cE '^[0-9]+: ') # If no devices found after retries, exit gracefully
if [ "$NUM_DEVICES" -eq 0 ]; then
exit 0
fi
# Disable RGB on each device
for i in $(seq 0 $((NUM_DEVICES - 1))); do for i in $(seq 0 $((NUM_DEVICES - 1))); do
openrgb --noautoconnect --device "$i" --mode direct --color 000000 openrgb --noautoconnect --device "$i" --mode direct --color 000000 || true
done done
''; '';
} }
@@ -31,9 +45,12 @@
in in
{ {
description = "disable rgb"; description = "disable rgb";
after = [ "systemd-udev-settle.service" ];
serviceConfig = { serviceConfig = {
ExecStart = lib.getExe no-rgb; ExecStart = lib.getExe no-rgb;
Type = "oneshot"; Type = "oneshot";
Restart = "on-failure";
RestartSec = 5;
}; };
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };

132
modules/ntfy-alerts.nix Normal file
View File

@@ -0,0 +1,132 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.ntfyAlerts;
curl = "${pkgs.curl}/bin/curl";
hostname = config.networking.hostName;
# Build the curl auth args as a proper bash array fragment
authCurlArgs =
if cfg.tokenFile != null then
''
if [ -f "${cfg.tokenFile}" ]; then
TOKEN=$(cat "${cfg.tokenFile}" 2>/dev/null || echo "")
if [ -n "$TOKEN" ]; then
AUTH_ARGS=(-H "Authorization: Bearer $TOKEN")
fi
fi
''
else
"";
# Systemd failure alert script
systemdAlertScript = pkgs.writeShellScript "ntfy-systemd-alert" ''
set -euo pipefail
UNIT_NAME="$1"
SERVER_URL="${cfg.serverUrl}"
TOPIC=$(cat "${cfg.topicFile}" 2>/dev/null | tr -d '[:space:]')
if [ -z "$TOPIC" ]; then
echo "ERROR: Could not read topic from ${cfg.topicFile}"
exit 1
fi
# Get journal output for context
JOURNAL_OUTPUT=$(${pkgs.systemd}/bin/journalctl -u "$UNIT_NAME" -n 15 --no-pager 2>/dev/null || echo "No journal output available")
# Build auth args
AUTH_ARGS=()
${authCurlArgs}
# Send notification
${curl} -sf --max-time 15 -X POST \
"$SERVER_URL/$TOPIC" \
-H "Title: [${hostname}] Service failed: $UNIT_NAME" \
-H "Priority: high" \
-H "Tags: warning" \
"''${AUTH_ARGS[@]}" \
-d "$JOURNAL_OUTPUT" || true
'';
in
{
options.services.ntfyAlerts = {
enable = lib.mkEnableOption "ntfy push notifications for system alerts";
serverUrl = lib.mkOption {
type = lib.types.str;
description = "The ntfy server URL (e.g. https://ntfy.example.com)";
example = "https://ntfy.example.com";
};
topicFile = lib.mkOption {
type = lib.types.path;
description = "Path to a file containing the ntfy topic name to publish alerts to.";
example = "/run/agenix/ntfy-alerts-topic";
};
tokenFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
Path to a file containing the ntfy auth token.
If set, uses Authorization: Bearer header for authentication.
'';
example = "/run/secrets/ntfy-token";
};
};
config = lib.mkIf cfg.enable {
# Per-service OnFailure for monitored services
systemd.services = {
"ntfy-alert@" = {
description = "Send ntfy notification for failed service %i";
unitConfig.OnFailure = lib.mkForce "";
serviceConfig = {
Type = "oneshot";
ExecStart = "${systemdAlertScript} %i";
TimeoutSec = 30;
};
};
# TODO: sanoid's ExecStartPre runs `zfs allow` which blocks on TXG sync;
# on the hdds pool (slow spinning disks + large async frees) this causes
# 30+ minute hangs and guaranteed timeouts. Suppress until we fix sanoid
# to run as root without `zfs allow`. See: nixpkgs#72060, openzfs/zfs#14180
"sanoid".unitConfig.OnFailure = lib.mkForce "";
};
# Global OnFailure drop-in for all services
systemd.packages = [
(pkgs.writeTextDir "etc/systemd/system/service.d/onfailure.conf" ''
[Unit]
OnFailure=ntfy-alert@%p.service
'')
# Sanoid-specific drop-in to override the global OnFailure (see TODO above)
(pkgs.writeTextDir "etc/systemd/system/sanoid.service.d/onfailure.conf" ''
[Unit]
OnFailure=
'')
];
# ZED (ZFS Event Daemon) ntfy notification settings
services.zfs.zed = {
enableMail = false;
settings = {
ZED_NTFY_URL = cfg.serverUrl;
ZED_NTFY_TOPIC = "$(cat ${cfg.topicFile} | tr -d '[:space:]')";
ZED_NTFY_ACCESS_TOKEN = lib.mkIf (cfg.tokenFile != null) "$(cat ${cfg.tokenFile})";
ZED_NOTIFY_VERBOSE = true;
};
};
};
}

37
modules/security.nix Normal file
View File

@@ -0,0 +1,37 @@
{
config,
lib,
pkgs,
...
}:
{
# memory allocator
# BREAKS REDIS-IMMICH
# environment.memoryAllocator.provider = "graphene-hardened";
# disable coredumps
systemd.coredump.enable = false;
services = {
dbus.implementation = "broker";
/*
logrotate.enable = true;
journald = {
storage = "volatile"; # Store logs in memory
upload.enable = false; # Disable remote log upload (the default)
extraConfig = ''
SystemMaxUse=500M
SystemMaxFileSize=50M
'';
};
*/
};
services.fail2ban = {
enable = true;
# Use iptables actions for compatibility
banaction = "iptables-multiport";
banaction-allports = "iptables-allports";
};
}

View File

@@ -64,7 +64,7 @@ in
yearly = 0; yearly = 0;
}; };
datasets."${service_configs.zpool_ssds}/services/jellyfin_cache" = { datasets."${service_configs.zpool_ssds}/services/jellyfin/cache" = {
recursive = true; recursive = true;
autoprune = true; autoprune = true;
autosnap = true; autosnap = true;

Binary file not shown.

BIN
secrets/livekit_keys Normal file

Binary file not shown.

BIN
secrets/matrix_reg_token Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
secrets/xmrig-wallet Normal file

Binary file not shown.

34
services/arr/bazarr.nix Normal file
View File

@@ -0,0 +1,34 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_ssds [
service_configs.bazarr.dataDir
])
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "bazarr" [
"Z ${service_configs.bazarr.dataDir} 0700 ${config.services.bazarr.user} ${config.services.bazarr.group}"
])
];
services.bazarr = {
enable = true;
listenPort = service_configs.ports.bazarr;
};
services.caddy.virtualHosts."bazarr.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${builtins.toString service_configs.ports.bazarr}
'';
users.users.${config.services.bazarr.user}.extraGroups = [
service_configs.media_group
];
}

115
services/arr/init.nix Normal file
View File

@@ -0,0 +1,115 @@
{ config, service_configs, ... }:
{
services.arrInit = {
prowlarr = {
enable = true;
serviceName = "prowlarr";
port = service_configs.ports.prowlarr;
dataDir = service_configs.prowlarr.dataDir;
apiVersion = "v1";
networkNamespacePath = "/run/netns/wg";
syncedApps = [
{
name = "Sonarr";
implementation = "Sonarr";
configContract = "SonarrSettings";
prowlarrUrl = "http://localhost:${builtins.toString service_configs.ports.prowlarr}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.sonarr}";
apiKeyFrom = "${service_configs.sonarr.dataDir}/config.xml";
syncCategories = [
5000
5010
5020
5030
5040
5045
5050
5090
];
serviceName = "sonarr";
}
{
name = "Radarr";
implementation = "Radarr";
configContract = "RadarrSettings";
prowlarrUrl = "http://localhost:${builtins.toString service_configs.ports.prowlarr}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.radarr}";
apiKeyFrom = "${service_configs.radarr.dataDir}/config.xml";
syncCategories = [
2000
2010
2020
2030
2040
2045
2050
2060
2070
2080
];
serviceName = "radarr";
}
];
};
sonarr = {
enable = true;
serviceName = "sonarr";
port = service_configs.ports.sonarr;
dataDir = service_configs.sonarr.dataDir;
rootFolders = [ service_configs.media.tvDir ];
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.torrent;
useSsl = false;
tvCategory = "tvshows";
};
}
];
};
radarr = {
enable = true;
serviceName = "radarr";
port = service_configs.ports.radarr;
dataDir = service_configs.radarr.dataDir;
rootFolders = [ service_configs.media.moviesDir ];
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.torrent;
useSsl = false;
movieCategory = "movies";
};
}
];
};
};
services.bazarrInit = {
enable = true;
dataDir = "/var/lib/bazarr";
port = service_configs.ports.bazarr;
sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.sonarr;
serviceName = "sonarr";
};
radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.radarr;
serviceName = "radarr";
};
};
}

View File

@@ -0,0 +1,43 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyseerr" service_configs.zpool_ssds [
service_configs.jellyseerr.configDir
])
(lib.serviceFilePerms "jellyseerr" [
"Z ${service_configs.jellyseerr.configDir} 0700 jellyseerr jellyseerr"
])
];
services.jellyseerr = {
enable = true;
port = service_configs.ports.jellyseerr;
configDir = service_configs.jellyseerr.configDir;
};
systemd.services.jellyseerr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "jellyseerr";
Group = "jellyseerr";
ReadWritePaths = [ service_configs.jellyseerr.configDir ];
};
users.users.jellyseerr = {
isSystemUser = true;
group = "jellyseerr";
home = service_configs.jellyseerr.configDir;
};
users.groups.jellyseerr = { };
services.caddy.virtualHosts."jellyseerr.${service_configs.https.domain}".extraConfig = ''
# import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${builtins.toString service_configs.ports.jellyseerr}
'';
}

30
services/arr/prowlarr.nix Normal file
View File

@@ -0,0 +1,30 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "prowlarr" service_configs.zpool_ssds [
service_configs.prowlarr.dataDir
])
(lib.vpnNamespaceOpenPort service_configs.ports.prowlarr "prowlarr")
];
services.prowlarr = {
enable = true;
dataDir = service_configs.prowlarr.dataDir;
settings.server.port = service_configs.ports.prowlarr;
};
systemd.services.prowlarr.serviceConfig = {
ExecStartPre = "+${pkgs.coreutils}/bin/chown -R prowlarr /var/lib/prowlarr";
};
services.caddy.virtualHosts."prowlarr.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy ${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.prowlarr}
'';
}

36
services/arr/radarr.nix Normal file
View File

@@ -0,0 +1,36 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "radarr" service_configs.zpool_ssds [
service_configs.radarr.dataDir
])
(lib.serviceMountWithZpool "radarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "radarr" [
"Z ${service_configs.radarr.dataDir} 0700 ${config.services.radarr.user} ${config.services.radarr.group}"
])
];
services.radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
settings.server.port = service_configs.ports.radarr;
settings.update.mechanism = "external";
};
services.caddy.virtualHosts."radarr.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${builtins.toString service_configs.ports.radarr}
'';
users.users.${config.services.radarr.user}.extraGroups = [
service_configs.media_group
];
}

202
services/arr/recyclarr.nix Normal file
View File

@@ -0,0 +1,202 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
appDataDir = "${service_configs.recyclarr.dataDir}/data";
# Runs as root (via + prefix) to read API keys, writes secrets.yml for recyclarr
generateSecrets = pkgs.writeShellScript "recyclarr-generate-secrets" ''
RADARR_KEY=$(${pkgs.gnugrep}/bin/grep -oP '(?<=<ApiKey>)[^<]+' ${radarrConfig})
SONARR_KEY=$(${pkgs.gnugrep}/bin/grep -oP '(?<=<ApiKey>)[^<]+' ${sonarrConfig})
cat > ${appDataDir}/secrets.yml <<EOF
movies_api_key: $RADARR_KEY
series_api_key: $SONARR_KEY
EOF
chown recyclarr:recyclarr ${appDataDir}/secrets.yml
chmod 600 ${appDataDir}/secrets.yml
'';
in
{
imports = [
(lib.serviceMountWithZpool "recyclarr" service_configs.zpool_ssds [
service_configs.recyclarr.dataDir
])
];
systemd.tmpfiles.rules = [
"d ${service_configs.recyclarr.dataDir} 0755 recyclarr recyclarr -"
"d ${appDataDir} 0755 recyclarr recyclarr -"
];
services.recyclarr = {
enable = true;
command = "sync";
schedule = "daily";
user = "recyclarr";
group = "recyclarr";
configuration = {
radarr.movies = {
base_url = "http://localhost:${builtins.toString service_configs.ports.radarr}";
include = [
{ template = "radarr-quality-definition-movie"; }
{ template = "radarr-quality-profile-remux-web-2160p"; }
{ template = "radarr-custom-formats-remux-web-2160p"; }
];
quality_profiles = [
{
name = "Remux + WEB 2160p";
upgrade = {
allowed = true;
until_quality = "Remux-2160p";
};
qualities = [
{ name = "Remux-2160p"; }
{
name = "WEB 2160p";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
];
}
{ name = "Remux-1080p"; }
{ name = "Bluray-1080p"; }
{
name = "WEB 1080p";
qualities = [
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
];
}
];
custom_formats = [
# Upscaled
{
trash_ids = [ "bfd8eb01832d646a0a89c4deb46f8564" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = -10000;
}
];
}
# x265 (HD) - override template -10000 penalty
{
trash_ids = [ "dc98083864ea246d05a42df0d05f81cc" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = 0;
}
];
}
# x265 (no HDR/DV) - override template -10000 penalty
{
trash_ids = [ "839bea857ed2c0a8e084f3cbdbd65ecb" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = 0;
}
];
}
];
};
sonarr.series = {
base_url = "http://localhost:${builtins.toString service_configs.ports.sonarr}";
include = [
{ template = "sonarr-quality-definition-series"; }
{ template = "sonarr-v4-quality-profile-web-2160p"; }
{ template = "sonarr-v4-custom-formats-web-2160p"; }
];
quality_profiles = [
{
name = "WEB-2160p";
upgrade = {
allowed = true;
until_quality = "WEB 2160p";
};
qualities = [
{
name = "WEB 2160p";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
];
}
{ name = "Bluray-1080p Remux"; }
{ name = "Bluray-1080p"; }
{
name = "WEB 1080p";
qualities = [
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
];
}
];
custom_formats = [
# Upscaled
{
trash_ids = [ "23297a736ca77c0fc8e70f8edd7ee56c" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = -10000;
}
];
}
# x265 (HD) - override template -10000 penalty
{
trash_ids = [ "47435ece6b99a0b477caf360e79ba0bb" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = 0;
}
];
}
# x265 (no HDR/DV) - override template -10000 penalty
{
trash_ids = [ "9b64dff695c2115facf1b6ea59c9bd07" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = 0;
}
];
}
];
};
};
};
# Add secrets generation before recyclarr runs
systemd.services.recyclarr = {
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig.ExecStartPre = "+${generateSecrets}";
};
}

42
services/arr/sonarr.nix Normal file
View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_ssds [
service_configs.sonarr.dataDir
])
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "sonarr" [
"Z ${service_configs.sonarr.dataDir} 0700 ${config.services.sonarr.user} ${config.services.sonarr.group}"
])
];
systemd.tmpfiles.rules = [
"d /torrents/media 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.tvDir} 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.moviesDir} 2775 root ${service_configs.media_group} -"
];
services.sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
settings.server.port = service_configs.ports.sonarr;
settings.update.mechanism = "external";
};
services.caddy.virtualHosts."sonarr.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${builtins.toString service_configs.ports.sonarr}
'';
users.users.${config.services.sonarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -26,6 +26,6 @@
services.caddy.virtualHosts."bitmagnet.${service_configs.https.domain}".extraConfig = '' services.caddy.virtualHosts."bitmagnet.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path} import ${config.age.secrets.caddy_auth.path}
reverse_proxy ${service_configs.https.wg_ip}:${builtins.toString service_configs.ports.bitmagnet} reverse_proxy ${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.bitmagnet}
''; '';
} }

View File

@@ -15,6 +15,10 @@
service_configs.vaultwarden.path service_configs.vaultwarden.path
config.services.vaultwarden.backupDir config.services.vaultwarden.backupDir
]) ])
(lib.serviceFilePerms "vaultwarden" [
"Z ${service_configs.vaultwarden.path} 0700 vaultwarden vaultwarden"
"Z ${config.services.vaultwarden.backupDir} 0700 vaultwarden vaultwarden"
])
]; ];
services.vaultwarden = { services.vaultwarden = {
@@ -39,8 +43,18 @@
} }
''; '';
systemd.tmpfiles.rules = [ # Protect Vaultwarden login from brute force attacks
"Z ${service_configs.vaultwarden.path} 0700 vaultwarden vaultwarden" services.fail2ban.jails.vaultwarden = {
"Z ${config.services.vaultwarden.backupDir} 0700 vaultwarden vaultwarden" enabled = true;
]; settings = {
backend = "systemd";
port = "http,https";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = ''^.*Username or password is incorrect\. Try again\. IP: <HOST>\..*$'';
ignoreregex = "";
journalmatch = "_SYSTEMD_UNIT=vaultwarden.service";
};
};
} }

View File

@@ -74,10 +74,34 @@ in
service_configs.ports.https service_configs.ports.https
# http (but really acmeCA challenges) # http (but really acmeCA challenges)
80 service_configs.ports.http
]; ];
networking.firewall.allowedUDPPorts = [ networking.firewall.allowedUDPPorts = [
service_configs.ports.https service_configs.ports.https
]; ];
# Protect Caddy basic auth endpoints from brute force attacks
services.fail2ban.jails.caddy-auth = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "/var/log/caddy/access-*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
# Ignore local network IPs - NAT hairpinning causes all LAN traffic to
# appear from the router IP (192.168.1.1). Banning it blocks all internal access.
ignoreip = "127.0.0.1/8 ::1 192.168.1.0/24";
};
filter.Definition = {
# Only match 401s where an Authorization header was actually sent.
# Without this, the normal HTTP Basic Auth challenge-response flow
# (browser probes without credentials, gets 401, then resends with
# credentials) counts every page visit as a "failure."
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
ignoreregex = "";
datepattern = ''"ts":{Epoch}\.'';
};
};
} }

59
services/coturn.nix Normal file
View File

@@ -0,0 +1,59 @@
{
config,
lib,
service_configs,
...
}:
{
services.coturn = {
enable = true;
realm = service_configs.https.domain;
use-auth-secret = true;
static-auth-secret = lib.strings.trim (builtins.readFile ../secrets/coturn_static_auth_secret);
listening-port = service_configs.ports.coturn;
tls-listening-port = service_configs.ports.coturn_tls;
no-cli = true;
# recommended security settings from Synapse's coturn docs
extraConfig = ''
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255
denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
'';
};
# coturn needs these ports open
networking.firewall = {
allowedTCPPorts = [
service_configs.ports.coturn
service_configs.ports.coturn_tls
];
allowedUDPPorts = [
service_configs.ports.coturn
service_configs.ports.coturn_tls
];
# relay port range
allowedUDPPortRanges = [
{
from = config.services.coturn.min-port;
to = config.services.coturn.max-port;
}
];
};
}

View File

@@ -8,6 +8,9 @@
{ {
imports = [ imports = [
(lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ]) (lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ])
(lib.serviceFilePerms "gitea" [
"Z ${config.services.gitea.stateDir} 0700 ${config.services.gitea.user} ${config.services.gitea.group}"
])
]; ];
services.gitea = { services.gitea = {
@@ -41,11 +44,6 @@
reverse_proxy :${builtins.toString config.services.gitea.settings.server.HTTP_PORT} reverse_proxy :${builtins.toString config.services.gitea.settings.server.HTTP_PORT}
''; '';
systemd.tmpfiles.rules = [
# 0700 for ssh permission reasons
"Z ${config.services.gitea.stateDir} 0700 ${config.services.gitea.user} ${config.services.gitea.group}"
];
services.postgresql = { services.postgresql = {
ensureDatabases = [ config.services.gitea.user ]; ensureDatabases = [ config.services.gitea.user ];
ensureUsers = [ ensureUsers = [
@@ -58,4 +56,19 @@
}; };
services.openssh.settings.AllowUsers = [ config.services.gitea.user ]; services.openssh.settings.AllowUsers = [ config.services.gitea.user ];
# Protect Gitea login from brute force attacks
services.fail2ban.jails.gitea = {
enabled = true;
settings = {
backend = "systemd";
port = "http,https";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = "^.*Failed authentication attempt for .* from <HOST>:.*$";
ignoreregex = "";
journalmatch = "_SYSTEMD_UNIT=gitea.service";
};
};
} }

View File

@@ -5,7 +5,8 @@
... ...
}: }:
let let
graphing-calculator = inputs.ytbn-graphing-software.packages.${pkgs.stdenv.hostPlatform.system}.web; graphing-calculator =
inputs.ytbn-graphing-software.packages.${pkgs.stdenv.targetPlatform.system}.web;
in in
{ {
services.caddy.virtualHosts."graphing.${service_configs.https.domain}".extraConfig = '' services.caddy.virtualHosts."graphing.${service_configs.https.domain}".extraConfig = ''

View File

@@ -13,6 +13,9 @@
(lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [ (lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [
config.services.immich.mediaLocation config.services.immich.mediaLocation
]) ])
(lib.serviceFilePerms "immich-server" [
"Z ${config.services.immich.mediaLocation} 0770 ${config.services.immich.user} ${config.services.immich.group}"
])
]; ];
services.immich = { services.immich = {
@@ -30,10 +33,6 @@
reverse_proxy :${builtins.toString config.services.immich.port} reverse_proxy :${builtins.toString config.services.immich.port}
''; '';
systemd.tmpfiles.rules = [
"Z ${config.services.immich.mediaLocation} 0770 ${config.services.immich.user} ${config.services.immich.group}"
];
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
immich-go immich-go
]; ];
@@ -42,4 +41,19 @@
"video" "video"
"render" "render"
]; ];
# Protect Immich login from brute force attacks
services.fail2ban.jails.immich = {
enabled = true;
settings = {
backend = "systemd";
port = "http,https";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = "^.*Failed login attempt for user .* from ip address <HOST>.*$";
ignoreregex = "";
journalmatch = "_SYSTEMD_UNIT=immich-server.service";
};
};
} }

View File

@@ -44,8 +44,14 @@
environment = { environment = {
JELLYFIN_URL = "http://localhost:${builtins.toString service_configs.ports.jellyfin}"; JELLYFIN_URL = "http://localhost:${builtins.toString service_configs.ports.jellyfin}";
QBITTORRENT_URL = "http://${service_configs.https.wg_ip}:${builtins.toString service_configs.ports.torrent}"; QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.torrent}";
CHECK_INTERVAL = "30"; CHECK_INTERVAL = "30";
# Bandwidth budget configuration
TOTAL_BANDWIDTH_BUDGET = "30000000"; # 30 Mbps in bits per second
SERVICE_BUFFER = "5000000"; # 5 Mbps reserved for other services (bps)
DEFAULT_STREAM_BITRATE = "10000000"; # 10 Mbps fallback when bitrate unknown (bps)
MIN_TORRENT_SPEED = "100"; # KB/s - below this, pause torrents instead
STREAM_BITRATE_HEADROOM = "1.1"; # multiplier per stream for bitrate fluctuations
}; };
}; };
} }

View File

@@ -14,6 +14,12 @@ logging.basicConfig(
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ServiceUnavailable(Exception):
"""Raised when a monitored service is temporarily unavailable."""
pass
class JellyfinQBittorrentMonitor: class JellyfinQBittorrentMonitor:
def __init__( def __init__(
self, self,
@@ -23,13 +29,25 @@ class JellyfinQBittorrentMonitor:
jellyfin_api_key=None, jellyfin_api_key=None,
streaming_start_delay=10, streaming_start_delay=10,
streaming_stop_delay=60, streaming_stop_delay=60,
total_bandwidth_budget=30000000,
service_buffer=5000000,
default_stream_bitrate=10000000,
min_torrent_speed=100,
stream_bitrate_headroom=1.1,
): ):
self.jellyfin_url = jellyfin_url self.jellyfin_url = jellyfin_url
self.qbittorrent_url = qbittorrent_url self.qbittorrent_url = qbittorrent_url
self.check_interval = check_interval self.check_interval = check_interval
self.jellyfin_api_key = jellyfin_api_key self.jellyfin_api_key = jellyfin_api_key
self.total_bandwidth_budget = total_bandwidth_budget
self.service_buffer = service_buffer
self.default_stream_bitrate = default_stream_bitrate
self.min_torrent_speed = min_torrent_speed
self.stream_bitrate_headroom = stream_bitrate_headroom
self.last_streaming_state = None self.last_streaming_state = None
self.throttle_active = False self.current_state = "unlimited"
self.torrents_paused = False
self.last_alt_limits = None
self.running = True self.running = True
self.session = requests.Session() # Use session for cookies self.session = requests.Session() # Use session for cookies
self.last_active_streams = [] self.last_active_streams = []
@@ -64,18 +82,26 @@ class JellyfinQBittorrentMonitor:
self.restore_normal_limits() self.restore_normal_limits()
sys.exit(0) sys.exit(0)
def check_jellyfin_sessions(self) -> list[str]: def check_jellyfin_sessions(self) -> list[dict]:
"""Check if anyone is actively streaming from Jellyfin (external networks only)""" headers = (
try: {"X-Emby-Token": self.jellyfin_api_key} if self.jellyfin_api_key else {}
headers = {"X-Emby-Token": self.jellyfin_api_key} if self.jellyfin_api_key else {} )
try:
response = requests.get( response = requests.get(
f"{self.jellyfin_url}/Sessions", headers=headers, timeout=10 f"{self.jellyfin_url}/Sessions", headers=headers, timeout=10
) )
response.raise_for_status() response.raise_for_status()
sessions = response.json() except requests.exceptions.RequestException as e:
logger.error(f"Failed to check Jellyfin sessions: {e}")
raise ServiceUnavailable(f"Jellyfin unavailable: {e}") from e
try:
sessions = response.json()
except json.JSONDecodeError as e:
logger.error(f"Failed to parse Jellyfin response: {e}")
raise ServiceUnavailable(f"Jellyfin returned invalid JSON: {e}") from e
# Count active streaming sessions (video only, external networks only)
active_streams = [] active_streams = []
for session in sessions: for session in sessions:
if ( if (
@@ -84,23 +110,29 @@ class JellyfinQBittorrentMonitor:
and not self.is_local_ip(session.get("RemoteEndPoint", "")) and not self.is_local_ip(session.get("RemoteEndPoint", ""))
): ):
item = session["NowPlayingItem"] item = session["NowPlayingItem"]
# Only count video streams (Movies, Episodes, etc.)
item_type = item.get("Type", "").lower() item_type = item.get("Type", "").lower()
if item_type in ["movie", "episode", "video"]: if item_type in ["movie", "episode", "video"]:
user = session.get("UserName", "Unknown") user = session.get("UserName", "Unknown")
active_streams.append(f"{user}: {item.get('Name', 'Unknown')}") stream_name = f"{user}: {item.get('Name', 'Unknown')}"
if session.get("TranscodingInfo") and session[
"TranscodingInfo"
].get("Bitrate"):
bitrate = session["TranscodingInfo"]["Bitrate"]
elif item.get("Bitrate"):
bitrate = item["Bitrate"]
elif item.get("MediaSources", [{}])[0].get("Bitrate"):
bitrate = item["MediaSources"][0]["Bitrate"]
else:
bitrate = self.default_stream_bitrate
bitrate = min(int(bitrate), 100_000_000)
# Add headroom to account for bitrate fluctuations
bitrate = int(bitrate * self.stream_bitrate_headroom)
active_streams.append({"name": stream_name, "bitrate_bps": bitrate})
return active_streams return active_streams
except requests.exceptions.RequestException as e: def check_qbittorrent_alternate_limits(self) -> bool:
logger.error(f"Failed to check Jellyfin sessions: {e}")
return []
except json.JSONDecodeError as e:
logger.error(f"Failed to parse Jellyfin response: {e}")
return []
def check_qbittorrent_alternate_limits(self):
"""Check if alternate speed limits are currently enabled"""
try: try:
response = self.session.get( response = self.session.get(
f"{self.qbittorrent_url}/api/v2/transfer/speedLimitsMode", timeout=10 f"{self.qbittorrent_url}/api/v2/transfer/speedLimitsMode", timeout=10
@@ -111,21 +143,20 @@ class JellyfinQBittorrentMonitor:
logger.warning( logger.warning(
f"SpeedLimitsMode endpoint returned HTTP {response.status_code}" f"SpeedLimitsMode endpoint returned HTTP {response.status_code}"
) )
raise ServiceUnavailable(
f"qBittorrent returned HTTP {response.status_code}"
)
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
logger.error(f"SpeedLimitsMode endpoint failed: {e}") logger.error(f"SpeedLimitsMode endpoint failed: {e}")
except Exception as e: raise ServiceUnavailable(f"qBittorrent unavailable: {e}") from e
logger.error(f"Failed to parse speedLimitsMode response: {e}")
return self.throttle_active
def use_alt_limits(self, enable: bool) -> None: def use_alt_limits(self, enable: bool) -> None:
"""Toggle qBittorrent alternate speed limits"""
action = "enabled" if enable else "disabled" action = "enabled" if enable else "disabled"
try: try:
current_throttle = self.check_qbittorrent_alternate_limits() current_throttle = self.check_qbittorrent_alternate_limits()
if current_throttle == enable: if current_throttle == enable:
logger.info( logger.debug(
f"Alternate speed limits already {action}, no action needed" f"Alternate speed limits already {action}, no action needed"
) )
return return
@@ -135,28 +166,93 @@ class JellyfinQBittorrentMonitor:
timeout=10, timeout=10,
) )
response.raise_for_status() response.raise_for_status()
self.throttle_active = enable
# Verify the change took effect
new_state = self.check_qbittorrent_alternate_limits() new_state = self.check_qbittorrent_alternate_limits()
if new_state == enable: if new_state == enable:
logger.info(f"Activated {action} alternate speed limits") logger.info(f"Alternate speed limits {action}")
else: else:
logger.warning( logger.warning(
f"Toggle may have failed: expected {enable}, got {new_state}" f"Toggle may have failed: expected {enable}, got {new_state}"
) )
except ServiceUnavailable:
logger.warning(
f"qBittorrent unavailable, cannot {action} alternate speed limits"
)
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
logger.error(f"Failed to {action} alternate speed limits: {e}") logger.error(f"Failed to {action} alternate speed limits: {e}")
except Exception as e:
logger.error(f"Failed to toggle qBittorrent limits: {e}") def pause_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/stop",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to pause torrents: {e}")
def resume_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/start",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to resume torrents: {e}")
def set_alt_speed_limits(self, dl_kbs: float, ul_kbs: float) -> None:
try:
payload = {
"alt_dl_limit": int(dl_kbs * 1024),
"alt_up_limit": int(ul_kbs * 1024),
}
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/app/setPreferences",
data={"json": json.dumps(payload)},
timeout=10,
)
response.raise_for_status()
self.last_alt_limits = (dl_kbs, ul_kbs)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to set alternate speed limits: {e}")
def restore_normal_limits(self) -> None: def restore_normal_limits(self) -> None:
"""Ensure normal speed limits are restored on shutdown""" if self.torrents_paused:
if self.throttle_active: logger.info("Resuming all torrents before shutdown...")
self.resume_all_torrents()
self.torrents_paused = False
if self.current_state != "unlimited":
logger.info("Restoring normal speed limits before shutdown...") logger.info("Restoring normal speed limits before shutdown...")
self.use_alt_limits(False) self.use_alt_limits(False)
self.current_state = "unlimited"
def sync_qbittorrent_state(self) -> None:
try:
if self.current_state == "unlimited":
actual_state = self.check_qbittorrent_alternate_limits()
if actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed OFF, got ON. Re-syncing..."
)
self.use_alt_limits(False)
elif self.current_state == "throttled":
if self.last_alt_limits:
self.set_alt_speed_limits(*self.last_alt_limits)
actual_state = self.check_qbittorrent_alternate_limits()
if not actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed ON, got OFF. Re-syncing..."
)
self.use_alt_limits(True)
elif self.current_state == "paused":
self.pause_all_torrents()
self.torrents_paused = True
except ServiceUnavailable:
pass
def should_change_state(self, new_streaming_state: bool) -> bool: def should_change_state(self, new_streaming_state: bool) -> bool:
"""Apply hysteresis to prevent rapid state changes""" """Apply hysteresis to prevent rapid state changes"""
@@ -167,7 +263,6 @@ class JellyfinQBittorrentMonitor:
time_since_change = now - self.last_state_change time_since_change = now - self.last_state_change
# Start throttling (streaming started)
if new_streaming_state and not self.last_streaming_state: if new_streaming_state and not self.last_streaming_state:
if time_since_change >= self.streaming_start_delay: if time_since_change >= self.streaming_start_delay:
self.last_state_change = now self.last_state_change = now
@@ -175,10 +270,9 @@ class JellyfinQBittorrentMonitor:
else: else:
remaining = self.streaming_start_delay - time_since_change remaining = self.streaming_start_delay - time_since_change
logger.info( logger.info(
f"Streaming started - waiting {remaining:.1f}s before enabling throttling" f"Streaming started - waiting {remaining:.1f}s before enforcing limits"
) )
# Stop throttling (streaming stopped)
elif not new_streaming_state and self.last_streaming_state: elif not new_streaming_state and self.last_streaming_state:
if time_since_change >= self.streaming_stop_delay: if time_since_change >= self.streaming_stop_delay:
self.last_state_change = now self.last_state_change = now
@@ -186,42 +280,119 @@ class JellyfinQBittorrentMonitor:
else: else:
remaining = self.streaming_stop_delay - time_since_change remaining = self.streaming_stop_delay - time_since_change
logger.info( logger.info(
f"Streaming stopped - waiting {remaining:.1f}s before disabling throttling" f"Streaming stopped - waiting {remaining:.1f}s before restoring unlimited mode"
) )
return False return False
def run(self): def run(self):
"""Main monitoring loop"""
logger.info("Starting Jellyfin-qBittorrent monitor") logger.info("Starting Jellyfin-qBittorrent monitor")
logger.info(f"Jellyfin URL: {self.jellyfin_url}") logger.info(f"Jellyfin URL: {self.jellyfin_url}")
logger.info(f"qBittorrent URL: {self.qbittorrent_url}") logger.info(f"qBittorrent URL: {self.qbittorrent_url}")
logger.info(f"Check interval: {self.check_interval}s") logger.info(f"Check interval: {self.check_interval}s")
logger.info(f"Streaming start delay: {self.streaming_start_delay}s")
logger.info(f"Streaming stop delay: {self.streaming_stop_delay}s")
logger.info(f"Total bandwidth budget: {self.total_bandwidth_budget} bps")
logger.info(f"Service buffer: {self.service_buffer} bps")
logger.info(f"Default stream bitrate: {self.default_stream_bitrate} bps")
logger.info(f"Minimum torrent speed: {self.min_torrent_speed} KB/s")
logger.info(f"Stream bitrate headroom: {self.stream_bitrate_headroom}x")
# Set up signal handlers
signal.signal(signal.SIGINT, self.signal_handler) signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler)
while self.running: while self.running:
try: try:
# Check for active streaming self.sync_qbittorrent_state()
try:
active_streams = self.check_jellyfin_sessions() active_streams = self.check_jellyfin_sessions()
except ServiceUnavailable:
logger.warning("Jellyfin unavailable, maintaining current state")
time.sleep(self.check_interval)
continue
streaming_active = len(active_streams) > 0 streaming_active = len(active_streams) > 0
if active_streams:
for stream in active_streams:
logger.debug(
f"Active stream: {stream['name']} ({stream['bitrate_bps']} bps)"
)
if active_streams != self.last_active_streams: if active_streams != self.last_active_streams:
# Log current status
if streaming_active: if streaming_active:
stream_names = ", ".join(
stream["name"] for stream in active_streams
)
logger.info( logger.info(
f"Active streams ({len(active_streams)}): {', '.join(active_streams)}" f"Active streams ({len(active_streams)}): {stream_names}"
) )
elif len(active_streams) == 0 and self.last_streaming_state: elif len(active_streams) == 0 and self.last_streaming_state:
logger.info("No active streaming sessions") logger.info("No active streaming sessions")
# Apply hysteresis and change state if needed
if self.should_change_state(streaming_active): if self.should_change_state(streaming_active):
self.last_streaming_state = streaming_active self.last_streaming_state = streaming_active
self.use_alt_limits(streaming_active)
streaming_state = bool(self.last_streaming_state)
total_streaming_bps = sum(
stream["bitrate_bps"] for stream in active_streams
)
remaining_bps = (
self.total_bandwidth_budget
- self.service_buffer
- total_streaming_bps
)
remaining_kbs = max(0, remaining_bps) / 8 / 1024
if not streaming_state:
desired_state = "unlimited"
elif streaming_active:
if remaining_kbs >= self.min_torrent_speed:
desired_state = "throttled"
else:
desired_state = "paused"
else:
desired_state = self.current_state
if desired_state != self.current_state:
if desired_state == "unlimited":
action = "resume torrents, disable alt speed"
elif desired_state == "throttled":
action = (
"set alt limits "
f"dl={int(remaining_kbs)}KB/s ul={int(remaining_kbs)}KB/s, enable alt speed"
)
else:
action = "pause torrents"
logger.info(
"State change %s -> %s | streams=%d total_bps=%d remaining_bps=%d action=%s",
self.current_state,
desired_state,
len(active_streams),
total_streaming_bps,
remaining_bps,
action,
)
if desired_state == "unlimited":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.use_alt_limits(False)
elif desired_state == "throttled":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.set_alt_speed_limits(remaining_kbs, remaining_kbs)
self.use_alt_limits(True)
else:
if not self.torrents_paused:
self.pause_all_torrents()
self.torrents_paused = True
self.current_state = desired_state
self.last_active_streams = active_streams self.last_active_streams = active_streams
time.sleep(self.check_interval) time.sleep(self.check_interval)
@@ -245,6 +416,11 @@ if __name__ == "__main__":
jellyfin_api_key = os.getenv("JELLYFIN_API_KEY") jellyfin_api_key = os.getenv("JELLYFIN_API_KEY")
streaming_start_delay = int(os.getenv("STREAMING_START_DELAY", "10")) streaming_start_delay = int(os.getenv("STREAMING_START_DELAY", "10"))
streaming_stop_delay = int(os.getenv("STREAMING_STOP_DELAY", "60")) streaming_stop_delay = int(os.getenv("STREAMING_STOP_DELAY", "60"))
total_bandwidth_budget = int(os.getenv("TOTAL_BANDWIDTH_BUDGET", "30000000"))
service_buffer = int(os.getenv("SERVICE_BUFFER", "5000000"))
default_stream_bitrate = int(os.getenv("DEFAULT_STREAM_BITRATE", "10000000"))
min_torrent_speed = int(os.getenv("MIN_TORRENT_SPEED", "100"))
stream_bitrate_headroom = float(os.getenv("STREAM_BITRATE_HEADROOM", "1.1"))
monitor = JellyfinQBittorrentMonitor( monitor = JellyfinQBittorrentMonitor(
jellyfin_url=jellyfin_url, jellyfin_url=jellyfin_url,
@@ -253,6 +429,11 @@ if __name__ == "__main__":
jellyfin_api_key=jellyfin_api_key, jellyfin_api_key=jellyfin_api_key,
streaming_start_delay=streaming_start_delay, streaming_start_delay=streaming_start_delay,
streaming_stop_delay=streaming_stop_delay, streaming_stop_delay=streaming_stop_delay,
total_bandwidth_budget=total_bandwidth_budget,
service_buffer=service_buffer,
default_stream_bitrate=default_stream_bitrate,
min_torrent_speed=min_torrent_speed,
stream_bitrate_headroom=stream_bitrate_headroom,
) )
monitor.run() monitor.run()

View File

@@ -11,6 +11,10 @@
config.services.jellyfin.dataDir config.services.jellyfin.dataDir
config.services.jellyfin.cacheDir config.services.jellyfin.cacheDir
]) ])
(lib.serviceFilePerms "jellyfin" [
"Z ${config.services.jellyfin.dataDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
"Z ${config.services.jellyfin.cacheDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
])
]; ];
services.jellyfin = { services.jellyfin = {
@@ -23,20 +27,34 @@
}; };
services.caddy.virtualHosts."jellyfin.${service_configs.https.domain}".extraConfig = '' services.caddy.virtualHosts."jellyfin.${service_configs.https.domain}".extraConfig = ''
reverse_proxy :${builtins.toString service_configs.ports.jellyfin} reverse_proxy :${builtins.toString service_configs.ports.jellyfin} {
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
request_body { request_body {
max_size 4096MB max_size 4096MB
} }
''; '';
systemd.tmpfiles.rules = [
"Z ${config.services.jellyfin.dataDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
"Z ${config.services.jellyfin.cacheDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
];
users.users.${config.services.jellyfin.user}.extraGroups = [ users.users.${config.services.jellyfin.user}.extraGroups = [
"video" "video"
"render" "render"
service_configs.media_group service_configs.media_group
]; ];
# Protect Jellyfin login from brute force attacks
services.fail2ban.jails.jellyfin = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "${config.services.jellyfin.dataDir}/log/log_*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = ''^.*Authentication request for .* has been denied \(IP: "<ADDR>"\)\..*$'';
ignoreregex = "";
};
};
} }

53
services/livekit.nix Normal file
View File

@@ -0,0 +1,53 @@
{
service_configs,
...
}:
let
keyFile = ../secrets/livekit_keys;
ports = service_configs.ports;
in
{
services.livekit = {
enable = true;
inherit keyFile;
openFirewall = true;
settings = {
port = ports.livekit;
bind_addresses = [ "127.0.0.1" ];
rtc = {
port_range_start = 50100;
port_range_end = 50200;
use_external_ip = true;
};
# Disable LiveKit's built-in TURN; coturn is already running
turn = {
enabled = false;
};
logging = {
level = "info";
};
};
};
services.lk-jwt-service = {
enable = true;
inherit keyFile;
livekitUrl = "wss://${service_configs.livekit.domain}";
port = ports.lk_jwt;
};
services.caddy.virtualHosts."${service_configs.livekit.domain}".extraConfig = ''
@jwt path /sfu/get /healthz
handle @jwt {
reverse_proxy :${builtins.toString ports.lk_jwt}
}
handle {
reverse_proxy :${builtins.toString ports.livekit}
}
'';
}

99
services/matrix.nix Normal file
View File

@@ -0,0 +1,99 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
let
package =
let
src = pkgs.fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "continuwuity";
rev = "052c4dfa2165fdc4839fed95b71446120273cf23";
hash = "sha256-kQV4glRrKczoJpn9QIMgB5ac+saZQjSZPel+9K9Ykcs=";
};
in
pkgs.matrix-continuwuity.overrideAttrs (old: {
inherit src;
cargoDeps = pkgs.rustPlatform.fetchCargoVendor {
inherit src;
name = "${old.pname}-vendor";
hash = "sha256-vlOXQL8wwEGFX+w0G/eIeHW3J1UDzhJ501kYhAghDV8=";
};
patches = (old.patches or [ ]) ++ [
];
});
in
{
imports = [
(lib.serviceMountWithZpool "continuwuity" service_configs.zpool_ssds [
"/var/lib/private/continuwuity"
])
(lib.serviceFilePerms "continuwuity" [
"Z /var/lib/private/continuwuity 0770 ${config.services.matrix-continuwuity.user} ${config.services.matrix-continuwuity.group}"
])
];
services.matrix-continuwuity = {
enable = true;
inherit package;
settings.global = {
port = [ service_configs.ports.matrix ];
server_name = service_configs.https.domain;
allow_registration = true;
registration_token = lib.strings.trim (builtins.readFile ../secrets/matrix_reg_token);
new_user_displayname_suffix = "";
trusted_servers = [
"matrix.org"
"constellatory.net"
"tchncs.de"
"envs.net"
];
address = [
"0.0.0.0"
];
# TURN server config (coturn)
turn_secret = config.services.coturn.static-auth-secret;
turn_uris = [
"turn:${service_configs.https.domain}?transport=udp"
"turn:${service_configs.https.domain}?transport=tcp"
];
turn_ttl = 86400;
};
};
services.caddy.virtualHosts.${service_configs.https.domain}.extraConfig = lib.mkBefore ''
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "${service_configs.matrix.domain}:${builtins.toString service_configs.ports.https}"}`
respond /.well-known/matrix/client `{"m.server":{"base_url":"https://${service_configs.matrix.domain}"},"m.homeserver":{"base_url":"https://${service_configs.matrix.domain}"},"org.matrix.msc3575.proxy":{"base_url":"https://${config.services.matrix-continuwuity.settings.global.server_name}"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://${service_configs.livekit.domain}"}]}`
'';
services.caddy.virtualHosts."${service_configs.matrix.domain}".extraConfig = ''
reverse_proxy :${builtins.toString service_configs.ports.matrix}
'';
# Exact duplicate for federation port
services.caddy.virtualHosts."${service_configs.matrix.domain}:${builtins.toString service_configs.ports.matrix_federation}".extraConfig =
config.services.caddy.virtualHosts."${service_configs.matrix.domain}".extraConfig;
# for federation
networking.firewall.allowedTCPPorts = [
service_configs.ports.matrix_federation
];
# for federation
networking.firewall.allowedUDPPorts = [
service_configs.ports.matrix_federation
];
}

View File

@@ -15,19 +15,10 @@
] ]
) )
inputs.nix-minecraft.nixosModules.minecraft-servers inputs.nix-minecraft.nixosModules.minecraft-servers
]; (lib.serviceFilePerms "minecraft-server-${service_configs.minecraft.server_name}" [
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 700 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
environment.systemPackages = [ "Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web 750 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
(pkgs.writeScriptBin "mc-console" '' ])
#!/bin/sh
${pkgs.tmux}/bin/tmux -S /run/minecraft/${service_configs.minecraft.server_name}.sock attach
'')
];
nixpkgs.config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"minecraft-server"
]; ];
services.minecraft-servers = { services.minecraft-servers = {
@@ -44,10 +35,41 @@
let let
heap_size = "4000M"; heap_size = "4000M";
in in
"-Xmx${heap_size} -Xms${heap_size} -XX:+UseZGC -XX:+ZGenerational"; lib.concatStringsSep " " [
# Memory
"-Xmx${heap_size}"
"-Xms${heap_size}"
# GC
"-XX:+UseZGC"
"-XX:+ZGenerational"
# Base JVM optimizations (brucethemoose/Minecraft-Performance-Flags-Benchmarks)
"-XX:+UnlockExperimentalVMOptions"
"-XX:+UnlockDiagnosticVMOptions"
"-XX:+AlwaysActAsServerClassMachine"
"-XX:+AlwaysPreTouch"
"-XX:+DisableExplicitGC"
"-XX:+UseNUMA"
"-XX:+PerfDisableSharedMem"
"-XX:+UseFastUnorderedTimeStamps"
"-XX:+UseCriticalJavaThreadPriority"
"-XX:ThreadPriorityPolicy=1"
"-XX:AllocatePrefetchStyle=3"
"-XX:-DontCompileHugeMethods"
"-XX:MaxNodeLimit=240000"
"-XX:NodeLimitFudgeFactor=8000"
"-XX:ReservedCodeCacheSize=400M"
"-XX:NonNMethodCodeHeapSize=12M"
"-XX:ProfiledCodeHeapSize=194M"
"-XX:NonProfiledCodeHeapSize=194M"
"-XX:NmethodSweepActivity=1"
"-XX:+UseVectorCmov"
# Large pages (requires vm.nr_hugepages sysctl)
"-XX:+UseLargePages"
"-XX:LargePageSizeInBytes=2m"
];
serverProperties = { serverProperties = {
server-port = 25565; server-port = service_configs.ports.minecraft;
enforce-whitelist = true; enforce-whitelist = true;
gamemode = "survival"; gamemode = "survival";
white-list = true; white-list = true;
@@ -66,23 +88,23 @@
with pkgs; with pkgs;
builtins.attrValues { builtins.attrValues {
FabricApi = fetchurl { FabricApi = fetchurl {
url = "https://cdn.modrinth.com/data/P7dR8mSH/versions/KhCFoeip/fabric-api-0.139.5%2B1.21.11.jar"; url = "https://cdn.modrinth.com/data/P7dR8mSH/versions/i5tSkVBH/fabric-api-0.141.3%2B1.21.11.jar";
sha512 = "852c9e76175b2d51cea191bfcc0005b824de433f1a6de01d672b9e82ca1cab8478b180670bc6c4811744ef4abec8bd2ff3ab0f9c1aa5644713d06f3fbcc278f0"; sha512 = "c20c017e23d6d2774690d0dd774cec84c16bfac5461da2d9345a1cd95eee495b1954333c421e3d1c66186284d24a433f6b0cced8021f62e0bfa617d2384d0471";
}; };
FerriteCore = fetchurl { FerriteCore = fetchurl {
url = "https://cdn.modrinth.com/data/uXXizFIs/versions/eRLwt73x/ferritecore-8.0.3-fabric.jar"; url = "https://cdn.modrinth.com/data/uXXizFIs/versions/Ii0gP3D8/ferritecore-8.2.0-fabric.jar";
sha512 = "be600543e499b59286f9409f46497570adc51939ae63eaa12ac29e6778da27d8c7c6cd0b3340d8bcca1cc99ce61779b1a8f52b990f9e4e9a93aa9c6482905231"; sha512 = "3210926a82eb32efd9bcebabe2f6c053daf5c4337eebc6d5bacba96d283510afbde646e7e195751de795ec70a2ea44fef77cb54bf22c8e57bb832d6217418869";
}; };
Lithium = fetchurl { Lithium = fetchurl {
url = "https://cdn.modrinth.com/data/gvQqBUqZ/versions/4DdLmtyz/lithium-fabric-0.21.1%2Bmc1.21.11.jar"; url = "https://cdn.modrinth.com/data/gvQqBUqZ/versions/qvNsoO3l/lithium-fabric-0.21.3%2Bmc1.21.11.jar";
sha512 = "0857d30d063dc704a264b2fe774a7e641926193cfdcde72fe2cd603043d8548045b955e30c05b1b2b96ef7d1c0f85d55269da26f44a0644c984b45623e976794"; sha512 = "2883739303f0bb602d3797cc601ed86ce6833e5ec313ddce675f3d6af3ee6a40b9b0a06dafe39d308d919669325e95c0aafd08d78c97acd976efde899c7810fd";
}; };
NoChatReports = fetchurl { NoChatReports = fetchurl {
url = "https://cdn.modrinth.com/data/qQyHxfxd/versions/78RjC1gi/NoChatReports-FABRIC-1.21.10-v2.16.0.jar"; url = "https://cdn.modrinth.com/data/qQyHxfxd/versions/rhykGstm/NoChatReports-FABRIC-1.21.11-v2.18.0.jar";
sha512 = "39b2f284f73f8290012b8b9cc70085d59668547fc7b4ec43ab34e4bca6b39a6691fbe32bc3326e40353ba9c16a06320e52818315be77799a5aad526370cbc773"; sha512 = "d2c35cc8d624616f441665aff67c0e366e4101dba243bad25ed3518170942c1a3c1a477b28805cd1a36c44513693b1c55e76bea627d3fced13927a3d67022ccc";
}; };
squaremap = fetchurl { squaremap = fetchurl {
@@ -96,8 +118,8 @@
}; };
c2me = fetchurl { c2me = fetchurl {
url = "https://cdn.modrinth.com/data/VSNURh3q/versions/DLKF3HZk/c2me-fabric-mc1.21.11-0.3.6%2Bbeta.1.0.jar"; url = "https://cdn.modrinth.com/data/VSNURh3q/versions/QdLiMUjx/c2me-fabric-mc1.21.11-0.3.7%2Balpha.0.7.jar";
sha512 = "d4f983aeb5083033b525522e623a9a9ba86b6fc9c83db008cc0575d0077e736ac9bee0b6b0e03b8d1c89ae27a4e5cdc269041f61eb0d1a10757de4c30b065467"; sha512 = "f9543febe2d649a82acd6d5b66189b6a3d820cf24aa503ba493fdb3bbd4e52e30912c4c763fe50006f9a46947ae8cd737d420838c61b93429542573ed67f958e";
}; };
krypton = fetchurl { krypton = fetchurl {
@@ -105,31 +127,46 @@
sha512 = "4dcd7228d1890ddfc78c99ff284b45f9cf40aae77ef6359308e26d06fa0d938365255696af4cc12d524c46c4886cdcd19268c165a2bf0a2835202fe857da5cab"; sha512 = "4dcd7228d1890ddfc78c99ff284b45f9cf40aae77ef6359308e26d06fa0d938365255696af4cc12d524c46c4886cdcd19268c165a2bf0a2835202fe857da5cab";
}; };
/*
better-fabric-console = fetchurl { better-fabric-console = fetchurl {
url = "https://cdn.modrinth.com/data/Y8o1j1Sf/versions/fZprQjU4/better-fabric-console-mc1.21.10-1.2.7.jar"; url = "https://cdn.modrinth.com/data/Y8o1j1Sf/versions/6aIKl5wy/better-fabric-console-mc1.21.11-1.2.9.jar";
sha512 = "0321e4a687ba5ed4dcb081aa48909d45c4e153f8b6217cd807f280f33250151b97ac80a122a83d48535c788d3c1e08a7ee882da3b20cf06021e03c1ddc943278"; sha512 = "427247dafd99df202ee10b4bf60ffcbbecbabfadb01c167097ffb5b85670edb811f4d061c2551be816295cbbc6b8ec5ec464c14a6ff41912ef1f6c57b038d320";
}; };
*/
disconnect-packet-fix = fetchurl { disconnect-packet-fix = fetchurl {
url = "https://cdn.modrinth.com/data/rd9rKuJT/versions/Gv74xveQ/disconnect-packet-fix-fabric-2.0.0.jar"; url = "https://cdn.modrinth.com/data/rd9rKuJT/versions/Gv74xveQ/disconnect-packet-fix-fabric-2.0.0.jar";
sha512 = "1fd6f09a41ce36284e1a8e9def53f3f6834d7201e69e54e24933be56445ba569fbc26278f28300d36926ba92db6f4f9c0ae245d23576aaa790530345587316db"; sha512 = "1fd6f09a41ce36284e1a8e9def53f3f6834d7201e69e54e24933be56445ba569fbc26278f28300d36926ba92db6f4f9c0ae245d23576aaa790530345587316db";
}; };
# Mixin apply for mod packetfixer failed
/*
packet-fixer = fetchurl { packet-fixer = fetchurl {
url = "https://cdn.modrinth.com/data/c7m1mi73/versions/LFMYVIc7/packetfixer-fabric-3.3.2-1.21.11.jar"; url = "https://cdn.modrinth.com/data/c7m1mi73/versions/CUh1DWeO/packetfixer-fabric-3.3.4-1.21.11.jar";
sha512 = "a7cdc4b81653ca7c823c91ffd29092365feff78b8d8e019f35ab6c47a0f18661768656cc5fe73f802ab7097d828d8173cc23d32b454a7acd64ff6b7118789413"; sha512 = "33331b16cb40c5e6fbaade3cacc26f3a0e8fa5805a7186f94d7366a0e14dbeee9de2d2e8c76fa71f5e9dd24eb1c261667c35447e32570ea965ca0f154fdfba0a";
}; };
*/
# fork of Modernfix for 1.21.11 (upstream will support 26.1)
modernfix = fetchurl {
url = "https://cdn.modrinth.com/data/TjSm1wrD/versions/JwSO8JCN/modernfix-5.25.2-build.4.jar";
sha512 = "0d65c05ac0475408c58ef54215714e6301113101bf98bfe4bb2ba949fbfddd98225ac4e2093a5f9206a9e01ba80a931424b237bdfa3b6e178c741ca6f7f8c6a3";
};
debugify = fetchurl {
url = "https://cdn.modrinth.com/data/QwxR6Gcd/versions/8Q49lnaU/debugify-1.21.11%2B1.0.jar";
sha512 = "04d82dd33f44ced37045f1f9a54ad4eacd70861ff74a8800f2d2df358579e6cb0ea86a34b0086b3e87026b1a0691dd6594b4fdc49f89106466eea840518beb03";
};
} }
); );
}; };
}; };
}; };
systemd.services.minecraft-server-main = {
serviceConfig = {
Nice = -5;
IOSchedulingPriority = 0;
LimitMEMLOCK = "infinity"; # Required for large pages
};
};
services.caddy.virtualHosts = lib.mkIf (config.services.caddy.enable) { services.caddy.virtualHosts = lib.mkIf (config.services.caddy.enable) {
"map.${service_configs.https.domain}".extraConfig = '' "map.${service_configs.https.domain}".extraConfig = ''
root * ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web root * ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web
@@ -145,7 +182,8 @@
}; };
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 700 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}" # Allow caddy (in minecraft group) to traverse to squaremap/web for map.gardling.com
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web 750 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}" "z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
]; ];
} }

View File

@@ -5,9 +5,12 @@
}: }:
{ {
imports = [ imports = [
(lib.serviceMountWithZpool "monero" service_configs.zpool_ssds [ (lib.serviceMountWithZpool "monero" service_configs.zpool_hdds [
service_configs.monero.dataDir service_configs.monero.dataDir
]) ])
(lib.serviceFilePerms "monero" [
"Z ${service_configs.monero.dataDir} 0700 monero monero"
])
]; ];
services.monero = { services.monero = {
@@ -17,8 +20,4 @@
restricted = true; restricted = true;
}; };
}; };
systemd.tmpfiles.rules = [
"Z ${service_configs.monero.dataDir} 0700 monero monero"
];
} }

10
services/ntfy-alerts.nix Normal file
View File

@@ -0,0 +1,10 @@
{ config, service_configs, ... }:
{
services.ntfyAlerts = {
enable = true;
serverUrl = "https://${service_configs.ntfy.domain}";
topicFile = config.age.secrets.ntfy-alerts-topic.path;
tokenFile = config.age.secrets.ntfy-alerts-token.path;
};
}

34
services/ntfy.nix Normal file
View File

@@ -0,0 +1,34 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "ntfy-sh" service_configs.zpool_ssds [
"/var/lib/private/ntfy-sh"
])
(lib.serviceFilePerms "ntfy-sh" [
"Z /var/lib/private/ntfy-sh 0700 ${config.services.ntfy-sh.user} ${config.services.ntfy-sh.group}"
])
];
services.ntfy-sh = {
enable = true;
settings = {
base-url = "https://${service_configs.ntfy.domain}";
listen-http = "127.0.0.1:${builtins.toString service_configs.ports.ntfy}";
behind-proxy = true;
auth-default-access = "deny-all";
enable-login = true;
enable-signup = false;
};
};
services.caddy.virtualHosts."${service_configs.ntfy.domain}".extraConfig = ''
reverse_proxy :${builtins.toString service_configs.ports.ntfy}
'';
}

View File

@@ -10,6 +10,9 @@
(lib.serviceMountWithZpool "postgresql" service_configs.zpool_ssds [ (lib.serviceMountWithZpool "postgresql" service_configs.zpool_ssds [
config.services.postgresql.dataDir config.services.postgresql.dataDir
]) ])
(lib.serviceFilePerms "postgresql" [
"Z ${config.services.postgresql.dataDir} 0700 postgres postgres"
])
]; ];
services.postgresql = { services.postgresql = {
@@ -18,8 +21,4 @@
dataDir = service_configs.postgres.dataDir; dataDir = service_configs.postgres.dataDir;
}; };
systemd.tmpfiles.rules = [
# postgresql requires 0700
"Z ${config.services.postgresql.dataDir} 0700 postgresql postgresql"
];
} }

View File

@@ -17,12 +17,24 @@
"${config.services.qbittorrent.profileDir}/qBittorrent" "${config.services.qbittorrent.profileDir}/qBittorrent"
]) ])
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent") (lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
(lib.serviceFilePerms "qbittorrent" [
# 0770: group (media) needs write to delete files during upgrades —
# Radarr/Sonarr must unlink the old file before placing the new one.
"Z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.SavePath} 0770 ${config.services.qbittorrent.user} ${service_configs.media_group}"
"Z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
"Z ${config.services.qbittorrent.profileDir} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
])
]; ];
services.qbittorrent = { services.qbittorrent = {
enable = true; enable = true;
webuiPort = service_configs.ports.torrent; webuiPort = service_configs.ports.torrent;
profileDir = "/var/lib/qBittorrent"; profileDir = "/var/lib/qBittorrent";
# Set the service group to 'media' so the systemd unit runs with media as
# the primary GID. Linux assigns new file ownership from the process's GID
# (set by systemd's Group= directive), not from /etc/passwd. Without this,
# downloads land as qbittorrent:qbittorrent (0700), blocking Radarr/Sonarr.
group = service_configs.media_group;
serverConfig.LegalNotice.Accepted = true; serverConfig.LegalNotice.Accepted = true;
@@ -43,7 +55,7 @@
serverConfig.BitTorrent = { serverConfig.BitTorrent = {
Session = { Session = {
MaxConnectionsPerTorrent = 10; MaxConnectionsPerTorrent = 50;
MaxUploadsPerTorrent = 10; MaxUploadsPerTorrent = 10;
MaxConnections = -1; MaxConnections = -1;
MaxUploads = -1; MaxUploads = -1;
@@ -51,9 +63,10 @@
MaxActiveCheckingTorrents = 5; MaxActiveCheckingTorrents = 5;
# queueing # queueing
QueueingSystemEnabled = false; QueueingSystemEnabled = true;
MaxActiveDownloads = 2; # num of torrents that can download at the same time MaxActiveDownloads = 5; # keep focused: fewer torrents, each gets more bandwidth
MaxActiveUploads = 20; MaxActiveUploads = -1;
MaxActiveTorrents = -1;
IgnoreSlowTorrentsForQueueing = true; IgnoreSlowTorrentsForQueueing = true;
GlobalUPSpeedLimit = 0; GlobalUPSpeedLimit = 0;
@@ -64,7 +77,7 @@
AlternativeGlobalDLSpeedLimit = 800; # 800 KB/s when throttled AlternativeGlobalDLSpeedLimit = 800; # 800 KB/s when throttled
IncludeOverheadInLimits = true; IncludeOverheadInLimits = true;
GlobalMaxRatio = 6.0; GlobalMaxRatio = 7.0;
AddTrackersEnabled = true; AddTrackersEnabled = true;
AdditionalTrackers = lib.concatStringsSep "\\n" ( AdditionalTrackers = lib.concatStringsSep "\\n" (
@@ -78,12 +91,17 @@
inherit (config.services.qbittorrent.serverConfig.Preferences.Downloads) TempPath; inherit (config.services.qbittorrent.serverConfig.Preferences.Downloads) TempPath;
TempPathEnabled = true; TempPathEnabled = true;
# how many connections per sec ConnectionSpeed = 200;
ConnectionSpeed = 300;
# Automatic Torrent Management: use category save paths for new torrents
DisableAutoTMMByDefault = false;
DisableAutoTMMTriggers.CategorySavePathChanged = false;
DisableAutoTMMTriggers.DefaultSavePathChanged = false;
ChokingAlgorithm = "RateBased"; ChokingAlgorithm = "RateBased";
PieceExtentAffinity = true; PieceExtentAffinity = true;
SuggestMode = true; SuggestMode = true;
CoalesceReadWrite = true;
}; };
Network = { Network = {
@@ -96,15 +114,9 @@
systemd.services.qbittorrent.serviceConfig.TimeoutStopSec = lib.mkForce 10; systemd.services.qbittorrent.serviceConfig.TimeoutStopSec = lib.mkForce 10;
systemd.tmpfiles.rules = [
"Z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.SavePath} 0750 ${config.services.qbittorrent.user} ${service_configs.media_group}"
"Z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
"Z ${config.services.qbittorrent.profileDir} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
];
services.caddy.virtualHosts."torrent.${service_configs.https.domain}".extraConfig = '' services.caddy.virtualHosts."torrent.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path} import ${config.age.secrets.caddy_auth.path}
reverse_proxy ${service_configs.https.wg_ip}:${builtins.toString config.services.qbittorrent.webuiPort} reverse_proxy ${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString config.services.qbittorrent.webuiPort}
''; '';
users.users.${config.services.qbittorrent.user}.extraGroups = [ users.users.${config.services.qbittorrent.user}.extraGroups = [

View File

@@ -16,6 +16,12 @@ in
service_configs.slskd.downloads service_configs.slskd.downloads
service_configs.slskd.incomplete service_configs.slskd.incomplete
]) ])
(lib.serviceFilePerms "slskd" [
"Z ${service_configs.music_dir} 0750 ${username} music"
"Z ${service_configs.slskd.base} 0750 ${config.services.slskd.user} ${config.services.slskd.group}"
"Z ${service_configs.slskd.downloads} 0750 ${config.services.slskd.user} music"
"Z ${service_configs.slskd.incomplete} 0750 ${config.services.slskd.user} music"
])
]; ];
users.groups."music" = { }; users.groups."music" = { };
@@ -65,13 +71,6 @@ in
users.users.${config.services.jellyfin.user}.extraGroups = [ "music" ]; users.users.${config.services.jellyfin.user}.extraGroups = [ "music" ];
users.users.${username}.extraGroups = [ "music" ]; users.users.${username}.extraGroups = [ "music" ];
systemd.tmpfiles.rules = [
"Z ${service_configs.music_dir} 0750 ${username} music"
"Z ${service_configs.slskd.base} 0750 ${config.services.slskd.user} ${config.services.slskd.group}"
"Z ${service_configs.slskd.downloads} 0750 ${config.services.slskd.user} music"
"Z ${service_configs.slskd.incomplete} 0750 ${config.services.slskd.user} music"
];
# doesn't work with auth???? # doesn't work with auth????
services.caddy.virtualHosts."soulseek.${service_configs.https.domain}".extraConfig = '' services.caddy.virtualHosts."soulseek.${service_configs.https.domain}".extraConfig = ''
reverse_proxy :${builtins.toString config.services.slskd.settings.web.port} reverse_proxy :${builtins.toString config.services.slskd.settings.web.port}

View File

@@ -32,5 +32,4 @@
# used for deploying configs to server # used for deploying configs to server
users.users.root.openssh.authorizedKeys.keys = users.users.root.openssh.authorizedKeys.keys =
config.users.users.${username}.openssh.authorizedKeys.keys; config.users.users.${username}.openssh.authorizedKeys.keys;
} }

54
services/syncthing.nix Normal file
View File

@@ -0,0 +1,54 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "syncthing" service_configs.zpool_ssds [
service_configs.syncthing.dataDir
service_configs.syncthing.signalBackupDir
service_configs.syncthing.grayjayBackupDir
])
(lib.serviceFilePerms "syncthing" [
"Z ${service_configs.syncthing.dataDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
"Z ${service_configs.syncthing.signalBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
"Z ${service_configs.syncthing.grayjayBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
])
];
services.syncthing = {
enable = true;
dataDir = service_configs.syncthing.dataDir;
guiAddress = "127.0.0.1:${toString service_configs.ports.syncthing_gui}";
overrideDevices = false;
overrideFolders = false;
settings = {
gui = {
insecureSkipHostcheck = true; # Allow access via reverse proxy
};
options = {
urAccepted = 1; # enable usage reporting
relaysEnabled = true;
};
};
};
# Open firewall ports for syncthing protocol
networking.firewall = {
allowedTCPPorts = [ service_configs.ports.syncthing_protocol ];
allowedUDPPorts = [ service_configs.ports.syncthing_discovery ];
};
services.caddy.virtualHosts."syncthing.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${toString service_configs.ports.syncthing_gui}
'';
}

63
services/xmrig.nix Normal file
View File

@@ -0,0 +1,63 @@
{
config,
lib,
pkgs,
hostname,
...
}:
let
walletAddress = lib.strings.trim (builtins.readFile ../secrets/xmrig-wallet);
threadCount = 12;
in
{
services.xmrig = {
enable = true;
package = pkgs.xmrig;
settings = {
autosave = true;
cpu = {
enabled = true;
huge-pages = true;
hw-aes = true;
rx = lib.range 0 (threadCount - 1);
};
randomx = {
"1gb-pages" = true;
};
opencl = false;
cuda = false;
pools = [
{
url = "gulf.moneroocean.stream:20128";
user = walletAddress;
pass = hostname + "~rx/0";
keepalive = true;
tls = true;
}
];
};
};
systemd.services.xmrig.serviceConfig = {
Nice = 19;
CPUSchedulingPolicy = "idle";
IOSchedulingClass = "idle";
};
# Stop mining on UPS battery to conserve power
services.apcupsd.hooks = lib.mkIf config.services.apcupsd.enable {
onbattery = "systemctl stop xmrig";
offbattery = "systemctl start xmrig";
};
# Reserve 1GB huge pages for RandomX (dataset is ~2GB)
boot.kernelParams = [
"hugepagesz=1G"
"hugepages=3"
];
}

124
tests/fail2ban-caddy.nix Normal file
View File

@@ -0,0 +1,124 @@
{
config,
lib,
pkgs,
...
}:
pkgs.testers.runNixOSTest {
name = "fail2ban-caddy";
nodes = {
server =
{
config,
pkgs,
lib,
...
}:
{
imports = [
../modules/security.nix
];
# Set up Caddy with basic auth (minimal config, no production stuff)
# Using bcrypt hash generated with: caddy hash-password --plaintext testpass
services.caddy = {
enable = true;
virtualHosts.":80".extraConfig = ''
log {
output file /var/log/caddy/access-server.log
format json
}
basic_auth {
testuser $2a$14$XqaQlGTdmofswciqrLlMz.rv0/jiGQq8aU.fP6mh6gCGiLf6Cl3.a
}
respond "Authenticated!" 200
'';
};
# Add the fail2ban jail for caddy-auth (same as in services/caddy.nix)
services.fail2ban.jails.caddy-auth = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "/var/log/caddy/access-*.log";
maxretry = 3; # Lower for testing
};
filter.Definition = {
# Only match 401s where an Authorization header was actually sent
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
ignoreregex = "";
datepattern = ''"ts":{Epoch}\.'';
};
};
# Create log directory and initial log file so fail2ban can start
systemd.tmpfiles.rules = [
"d /var/log/caddy 755 caddy caddy"
"f /var/log/caddy/access-server.log 644 caddy caddy"
];
networking.firewall.allowedTCPPorts = [ 80 ];
};
client = {
environment.systemPackages = [ pkgs.curl ];
};
};
testScript = ''
import time
import re
start_all()
server.wait_for_unit("caddy.service")
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(80)
time.sleep(2)
with subtest("Verify caddy-auth jail is active"):
status = server.succeed("fail2ban-client status")
assert "caddy-auth" in status, f"caddy-auth jail not found in: {status}"
with subtest("Verify correct password works"):
# Use -4 to force IPv4 for consistency
result = client.succeed("curl -4 -s -u testuser:testpass http://server/")
print(f"Curl result: {result}")
assert "Authenticated" in result, f"Auth should succeed: {result}"
with subtest("Unauthenticated requests (browser probes) should not trigger ban"):
# Simulate browser probe requests - no Authorization header sent
# This is the normal HTTP Basic Auth challenge-response flow:
# browser sends request without credentials, gets 401, then resends with credentials
for i in range(5):
client.execute("curl -4 -s http://server/ || true")
time.sleep(0.5)
time.sleep(3)
status = server.succeed("fail2ban-client status caddy-auth")
print(f"caddy-auth jail status after unauthenticated requests: {status}")
match = re.search(r"Currently banned:\s*(\d+)", status)
banned = int(match.group(1)) if match else 0
assert banned == 0, f"Unauthenticated 401s should NOT trigger ban, but {banned} IPs were banned: {status}"
with subtest("Generate failed basic auth attempts (wrong password)"):
# Use -4 to force IPv4 for consistent IP tracking
# These send an Authorization header with wrong credentials
for i in range(4):
client.execute("curl -4 -s -u testuser:wrongpass http://server/ || true")
time.sleep(1)
with subtest("Verify IP is banned after wrong password attempts"):
time.sleep(5)
status = server.succeed("fail2ban-client status caddy-auth")
print(f"caddy-auth jail status: {status}")
# Check that at least 1 IP is banned
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("curl -4 -s --max-time 3 http://server/ 2>&1")[0]
assert exit_code != 0, "Connection should be blocked"
'';
}

123
tests/fail2ban-gitea.nix Normal file
View File

@@ -0,0 +1,123 @@
{
config,
lib,
pkgs,
...
}:
let
testServiceConfigs = {
zpool_ssds = "";
gitea = {
dir = "/var/lib/gitea";
domain = "git.test.local";
};
postgres = {
socket = "/run/postgresql";
};
ports = {
gitea = 3000;
};
};
testLib = lib.extend (
final: prev: {
serviceMountWithZpool =
serviceName: zpool: dirs:
{ ... }:
{ };
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
}
);
giteaModule =
{ config, pkgs, ... }:
{
imports = [
(import ../services/gitea.nix {
inherit config pkgs;
lib = testLib;
service_configs = testServiceConfigs;
})
];
};
in
pkgs.testers.runNixOSTest {
name = "fail2ban-gitea";
nodes = {
server =
{
config,
lib,
pkgs,
...
}:
{
imports = [
../modules/security.nix
giteaModule
];
# Enable postgres for gitea
services.postgresql.enable = true;
# Disable ZFS mount dependency
systemd.services."gitea-mounts".enable = lib.mkForce false;
systemd.services.gitea = {
wants = lib.mkForce [ ];
after = lib.mkForce [ "postgresql.service" ];
requires = lib.mkForce [ ];
};
# Override for faster testing and correct port
services.fail2ban.jails.gitea.settings = {
maxretry = lib.mkForce 3;
# In test, we connect directly to Gitea port, not via Caddy
port = lib.mkForce "3000";
};
networking.firewall.allowedTCPPorts = [ 3000 ];
};
client = {
environment.systemPackages = [ pkgs.curl ];
};
};
testScript = ''
import time
import re
start_all()
server.wait_for_unit("postgresql.service")
server.wait_for_unit("gitea.service")
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(3000)
time.sleep(3)
with subtest("Verify gitea jail is active"):
status = server.succeed("fail2ban-client status")
assert "gitea" in status, f"gitea jail not found in: {status}"
with subtest("Generate failed login attempts"):
# Use -4 to force IPv4 for consistent IP tracking
for i in range(4):
client.execute(
"curl -4 -s -X POST http://server:3000/user/login -d 'user_name=baduser&password=badpass' || true"
)
time.sleep(0.5)
with subtest("Verify IP is banned"):
time.sleep(3)
status = server.succeed("fail2ban-client status gitea")
print(f"gitea jail status: {status}")
# Check that at least 1 IP is banned
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("curl -4 -s --max-time 3 http://server:3000/ 2>&1")[0]
assert exit_code != 0, "Connection should be blocked"
'';
}

135
tests/fail2ban-immich.nix Normal file
View File

@@ -0,0 +1,135 @@
{
config,
lib,
pkgs,
...
}:
let
testServiceConfigs = {
zpool_ssds = "";
https = {
domain = "test.local";
};
ports = {
immich = 2283;
};
immich = {
dir = "/var/lib/immich";
};
};
testLib = lib.extend (
final: prev: {
serviceMountWithZpool =
serviceName: zpool: dirs:
{ ... }:
{ };
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
}
);
immichModule =
{ config, pkgs, ... }:
{
imports = [
(import ../services/immich.nix {
inherit config pkgs;
lib = testLib;
service_configs = testServiceConfigs;
})
];
};
in
pkgs.testers.runNixOSTest {
name = "fail2ban-immich";
nodes = {
server =
{
config,
lib,
pkgs,
...
}:
{
imports = [
../modules/security.nix
immichModule
];
# Immich needs postgres
services.postgresql.enable = true;
# Let immich create its own DB for testing
services.immich.database.createDB = lib.mkForce true;
# Disable ZFS mount dependencies
systemd.services."immich-server-mounts".enable = lib.mkForce false;
systemd.services."immich-machine-learning-mounts".enable = lib.mkForce false;
systemd.services.immich-server = {
wants = lib.mkForce [ ];
after = lib.mkForce [ "postgresql.service" ];
requires = lib.mkForce [ ];
};
systemd.services.immich-machine-learning = {
wants = lib.mkForce [ ];
after = lib.mkForce [ ];
requires = lib.mkForce [ ];
};
# Override for faster testing and correct port
services.fail2ban.jails.immich.settings = {
maxretry = lib.mkForce 3;
# In test, we connect directly to Immich port, not via Caddy
port = lib.mkForce "2283";
};
networking.firewall.allowedTCPPorts = [ 2283 ];
# Immich needs more resources
virtualisation.diskSize = 4 * 1024;
virtualisation.memorySize = 4 * 1024; # 4GB RAM for Immich
};
client = {
environment.systemPackages = [ pkgs.curl ];
};
};
testScript = ''
import time
import re
start_all()
server.wait_for_unit("postgresql.service")
server.wait_for_unit("immich-server.service", timeout=120)
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(2283, timeout=60)
time.sleep(3)
with subtest("Verify immich jail is active"):
status = server.succeed("fail2ban-client status")
assert "immich" in status, f"immich jail not found in: {status}"
with subtest("Generate failed login attempts"):
# Use -4 to force IPv4 for consistent IP tracking
for i in range(4):
client.execute(
"curl -4 -s -X POST http://server:2283/api/auth/login -H 'Content-Type: application/json' -d '{\"email\":\"bad@user.com\",\"password\":\"badpass\"}' || true"
)
time.sleep(0.5)
with subtest("Verify IP is banned"):
time.sleep(3)
status = server.succeed("fail2ban-client status immich")
print(f"immich jail status: {status}")
# Check that at least 1 IP is banned
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("curl -4 -s --max-time 3 http://server:2283/ 2>&1")[0]
assert exit_code != 0, "Connection should be blocked"
'';
}

147
tests/fail2ban-jellyfin.nix Normal file
View File

@@ -0,0 +1,147 @@
{
config,
lib,
pkgs,
...
}:
let
testServiceConfigs = {
zpool_ssds = "";
https = {
domain = "test.local";
};
ports = {
jellyfin = 8096;
};
jellyfin = {
dataDir = "/var/lib/jellyfin";
cacheDir = "/var/cache/jellyfin";
};
media_group = "media";
};
testLib = lib.extend (
final: prev: {
serviceMountWithZpool =
serviceName: zpool: dirs:
{ ... }:
{ };
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
optimizePackage = pkg: pkg; # No-op for testing
}
);
jellyfinModule =
{ config, pkgs, ... }:
{
imports = [
(import ../services/jellyfin.nix {
inherit config pkgs;
lib = testLib;
service_configs = testServiceConfigs;
})
];
};
in
pkgs.testers.runNixOSTest {
name = "fail2ban-jellyfin";
nodes = {
server =
{
config,
lib,
pkgs,
...
}:
{
imports = [
../modules/security.nix
jellyfinModule
];
# Create the media group
users.groups.media = { };
# Disable ZFS mount dependency
systemd.services."jellyfin-mounts".enable = lib.mkForce false;
systemd.services.jellyfin = {
wants = lib.mkForce [ ];
after = lib.mkForce [ ];
requires = lib.mkForce [ ];
};
# Override for faster testing and correct port
services.fail2ban.jails.jellyfin.settings = {
maxretry = lib.mkForce 3;
# In test, we connect directly to Jellyfin port, not via Caddy
port = lib.mkForce "8096";
};
# Create log directory and placeholder log file for fail2ban
# Jellyfin logs to files, not systemd journal
systemd.tmpfiles.rules = [
"d /var/lib/jellyfin/log 0755 jellyfin jellyfin"
"f /var/lib/jellyfin/log/log_placeholder.log 0644 jellyfin jellyfin"
];
# Make fail2ban start after Jellyfin
systemd.services.fail2ban = {
wants = [ "jellyfin.service" ];
after = [ "jellyfin.service" ];
};
# Give jellyfin more disk space and memory
virtualisation.diskSize = 3 * 1024;
virtualisation.memorySize = 2 * 1024;
};
client = {
environment.systemPackages = [ pkgs.curl ];
};
};
testScript = ''
import time
import re
start_all()
server.wait_for_unit("jellyfin.service")
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(8096)
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
time.sleep(2)
# Wait for Jellyfin to create real log files and reload fail2ban
server.wait_until_succeeds("ls /var/lib/jellyfin/log/log_2*.log", timeout=30)
server.succeed("fail2ban-client reload jellyfin")
with subtest("Verify jellyfin jail is active"):
status = server.succeed("fail2ban-client status")
assert "jellyfin" in status, f"jellyfin jail not found in: {status}"
with subtest("Generate failed login attempts"):
# Use -4 to force IPv4 for consistent IP tracking
for i in range(4):
client.execute("""
curl -4 -s -X POST http://server:8096/Users/authenticatebyname \
-H 'Content-Type: application/json' \
-H 'X-Emby-Authorization: MediaBrowser Client="test", Device="test", DeviceId="test", Version="1.0"' \
-d '{"Username":"baduser","Pw":"badpass"}' || true
""")
time.sleep(0.5)
with subtest("Verify IP is banned"):
time.sleep(3)
status = server.succeed("fail2ban-client status jellyfin")
print(f"jellyfin jail status: {status}")
# Check that at least 1 IP is banned
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("curl -4 -s --max-time 3 http://server:8096/ 2>&1")[0]
assert exit_code != 0, "Connection should be blocked"
'';
}

104
tests/fail2ban-ssh.nix Normal file
View File

@@ -0,0 +1,104 @@
{
config,
lib,
pkgs,
...
}:
let
testServiceConfigs = {
zpool_ssds = "";
zpool_hdds = "";
};
securityModule = import ../modules/security.nix;
sshModule =
{
config,
lib,
pkgs,
...
}:
{
imports = [
(import ../services/ssh.nix {
inherit config lib pkgs;
username = "testuser";
})
];
};
in
pkgs.testers.runNixOSTest {
name = "fail2ban-ssh";
nodes = {
server =
{
config,
lib,
pkgs,
...
}:
{
imports = [
securityModule
sshModule
];
# Override for testing - enable password auth
services.openssh.settings.PasswordAuthentication = lib.mkForce true;
users.users.testuser = {
isNormalUser = true;
password = "correctpassword";
};
networking.firewall.allowedTCPPorts = [ 22 ];
};
client = {
environment.systemPackages = with pkgs; [
sshpass
openssh
];
};
};
testScript = ''
import time
start_all()
server.wait_for_unit("sshd.service")
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(22)
time.sleep(2)
with subtest("Verify sshd jail is active"):
status = server.succeed("fail2ban-client status")
assert "sshd" in status, f"sshd jail not found in: {status}"
with subtest("Generate failed SSH login attempts"):
# Use -4 to force IPv4, timeout and NumberOfPasswordPrompts=1 to ensure quick failure
# maxRetry is 3 in our config, so 4 attempts should trigger a ban
for i in range(4):
client.execute(
"timeout 5 sshpass -p 'wrongpassword' ssh -4 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=3 -o NumberOfPasswordPrompts=1 testuser@server echo test 2>/dev/null || true"
)
time.sleep(1)
with subtest("Verify IP is banned"):
# Wait for fail2ban to process the logs and apply the ban
time.sleep(5)
status = server.succeed("fail2ban-client status sshd")
print(f"sshd jail status: {status}")
# Check that at least 1 IP is banned
import re
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("timeout 3 nc -4 -z -w 2 server 22")[0]
assert exit_code != 0, "Connection should be blocked for banned IP"
'';
}

View File

@@ -0,0 +1,137 @@
{
config,
lib,
pkgs,
...
}:
let
testServiceConfigs = {
zpool_ssds = "";
https = {
domain = "test.local";
};
ports = {
vaultwarden = 8222;
};
vaultwarden = {
path = "/var/lib/vaultwarden";
};
};
testLib = lib.extend (
final: prev: {
serviceMountWithZpool =
serviceName: zpool: dirs:
{ ... }:
{ };
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
}
);
vaultwardenModule =
{ config, pkgs, ... }:
{
imports = [
(import ../services/bitwarden.nix {
inherit config pkgs;
lib = testLib;
service_configs = testServiceConfigs;
})
];
};
in
pkgs.testers.runNixOSTest {
name = "fail2ban-vaultwarden";
nodes = {
server =
{
config,
lib,
pkgs,
...
}:
{
imports = [
../modules/security.nix
vaultwardenModule
];
# Disable ZFS mount dependencies
systemd.services."vaultwarden-mounts".enable = lib.mkForce false;
systemd.services."backup-vaultwarden-mounts".enable = lib.mkForce false;
systemd.services.vaultwarden = {
wants = lib.mkForce [ ];
after = lib.mkForce [ ];
requires = lib.mkForce [ ];
};
systemd.services.backup-vaultwarden = {
wants = lib.mkForce [ ];
after = lib.mkForce [ ];
requires = lib.mkForce [ ];
};
# Override Vaultwarden settings for testing
# - Listen on all interfaces (not just localhost)
# - Enable logging at info level to capture failed login attempts
services.vaultwarden.config = {
ROCKET_ADDRESS = lib.mkForce "0.0.0.0";
ROCKET_LOG = lib.mkForce "info";
};
# Override for faster testing and correct port
services.fail2ban.jails.vaultwarden.settings = {
maxretry = lib.mkForce 3;
# In test, we connect directly to Vaultwarden port, not via Caddy
port = lib.mkForce "8222";
};
networking.firewall.allowedTCPPorts = [ 8222 ];
};
client = {
environment.systemPackages = [ pkgs.curl ];
};
};
testScript = ''
import time
import re
start_all()
server.wait_for_unit("vaultwarden.service")
server.wait_for_unit("fail2ban.service")
server.wait_for_open_port(8222)
time.sleep(2)
with subtest("Verify vaultwarden jail is active"):
status = server.succeed("fail2ban-client status")
assert "vaultwarden" in status, f"vaultwarden jail not found in: {status}"
with subtest("Generate failed login attempts"):
# Use -4 to force IPv4 for consistent IP tracking
for i in range(4):
client.execute("""
curl -4 -s -X POST 'http://server:8222/identity/connect/token' \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Bitwarden-Client-Name: web' \
-H 'Bitwarden-Client-Version: 2024.1.0' \
-d 'grant_type=password&username=bad@user.com&password=badpass&scope=api+offline_access&client_id=web&deviceType=10&deviceIdentifier=test&deviceName=test' \
|| true
""")
time.sleep(0.5)
with subtest("Verify IP is banned"):
time.sleep(3)
status = server.succeed("fail2ban-client status vaultwarden")
print(f"vaultwarden jail status: {status}")
# Check that at least 1 IP is banned
match = re.search(r"Currently banned:\s*(\d+)", status)
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
with subtest("Verify banned client cannot connect"):
# Use -4 to test with same IP that was banned
exit_code = client.execute("curl -4 -s --max-time 3 http://server:8222/ 2>&1")[0]
assert exit_code != 0, "Connection should be blocked"
'';
}

53
tests/file-perms.nix Normal file
View File

@@ -0,0 +1,53 @@
{
config,
lib,
pkgs,
...
}:
let
testPkgs = pkgs.appendOverlays [ (import ../modules/overlays.nix) ];
in
testPkgs.testers.runNixOSTest {
name = "file-perms test";
nodes.machine =
{ pkgs, ... }:
{
imports = [
(lib.serviceFilePerms "test-service" [
"Z /tmp/test-perms-dir 0750 nobody nogroup"
])
];
systemd.services."test-service" = {
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = lib.getExe pkgs.bash;
};
};
};
testScript = ''
start_all()
machine.wait_for_unit("multi-user.target")
# Create test directory with wrong permissions
machine.succeed("mkdir -p /tmp/test-perms-dir")
machine.succeed("chown root:root /tmp/test-perms-dir")
machine.succeed("chmod 700 /tmp/test-perms-dir")
# Start service -- this should pull in test-service-file-perms
machine.succeed("systemctl start test-service")
# Verify file-perms service ran and is active
machine.succeed("systemctl is-active test-service-file-perms.service")
# Verify permissions were fixed by tmpfiles
result = machine.succeed("stat -c '%U:%G' /tmp/test-perms-dir").strip()
assert result == "nobody:nogroup", f"Expected nobody:nogroup, got {result}"
result = machine.succeed("stat -c '%a' /tmp/test-perms-dir").strip()
assert result == "750", f"Expected 750, got {result}"
'';
}

View File

@@ -1,305 +1,583 @@
{ {
config,
lib, lib,
pkgs, pkgs,
inputs,
... ...
}: }:
let
payloads = {
auth = pkgs.writeText "auth.json" (builtins.toJSON { Username = "jellyfin"; });
empty = pkgs.writeText "empty.json" (builtins.toJSON { });
};
in
pkgs.testers.runNixOSTest { pkgs.testers.runNixOSTest {
name = "jellyfin-qbittorrent-monitor"; name = "jellyfin-qbittorrent-monitor";
nodes = { nodes = {
server = server =
{ pkgs, config, ... }: { ... }:
{ {
# Mock qBittorrent service imports = [
systemd.services.mock-qbittorrent = { inputs.vpn-confinement.nixosModules.default
description = "Mock qBittorrent API server"; ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = lib.getExe (
pkgs.writers.writePython3Bin "mock-qbt" { flakeIgnore = [ "E501" ]; } ''
import http.server
import socketserver
services.jellyfin.enable = true;
class MockQBittorrentHandler(http.server.BaseHTTPRequestHandler): # Real qBittorrent service
def do_GET(self): services.qbittorrent = {
if self.path == '/api/v2/transfer/speedLimitsMode': enable = true;
self.send_response(200) webuiPort = 8080;
self.send_header('Content-type', 'text/plain') openFirewall = true;
self.end_headers()
response = '1' if getattr(self.server, 'speed_limits_mode', False) else '0'
self.wfile.write(response.encode())
else:
self.send_response(404)
self.end_headers()
def do_POST(self): serverConfig.LegalNotice.Accepted = true;
if self.path == '/api/v2/transfer/toggleSpeedLimitsMode':
self.server.speed_limits_mode = not getattr(self.server, 'speed_limits_mode', False)
self.send_response(200)
self.end_headers()
print(f'MONITOR_TEST: Speed limits toggled to {self.server.speed_limits_mode}')
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args): serverConfig.Preferences = {
print(f'qBittorrent Mock: {format % args}') WebUI = {
# Disable authentication for testing
AuthSubnetWhitelist = "0.0.0.0/0,::/0";
AuthSubnetWhitelistEnabled = true;
LocalHostAuth = false;
};
Downloads = {
with socketserver.TCPServer(('127.0.0.1', 8080), MockQBittorrentHandler) as httpd: SavePath = "/var/lib/qbittorrent/downloads";
httpd.speed_limits_mode = False TempPath = "/var/lib/qbittorrent/incomplete";
print('Mock qBittorrent server started on port 8080')
httpd.serve_forever()
''
);
Restart = "always";
RestartSec = "5s";
}; };
}; };
# Mock Jellyfin service with controllable streaming state serverConfig.BitTorrent.Session = {
systemd.services.mock-jellyfin = { # Normal speed - unlimited
description = "Mock Jellyfin API server"; GlobalUPSpeedLimit = 0;
after = [ "network.target" ]; GlobalDLSpeedLimit = 0;
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = lib.getExe (
pkgs.writers.writePython3Bin "mock-jellyfin" { flakeIgnore = [ "E501" ]; } ''
import http.server
import socketserver
import json
# Alternate speed limits for when Jellyfin is streaming
class MockJellyfinHandler(http.server.BaseHTTPRequestHandler): AlternativeGlobalUPSpeedLimit = 100;
def do_GET(self): AlternativeGlobalDLSpeedLimit = 100;
if self.path == '/Sessions':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
state = getattr(self.server, 'test_state', {
'streaming': False,
'paused': False,
'local': False,
'media_type': 'Movie'
})
if state['streaming']:
if state['local']:
remote_ip = '192.168.1.100'
else:
remote_ip = '203.0.113.42'
# Map media types to names
type_names = {
'Movie': 'Test Movie',
'Episode': 'Test Episode S01E01',
'Video': 'Test Video',
'Audio': 'Test Song'
}
sessions = [{
'Id': 'test-session-1',
'UserName': 'ExternalUser',
'RemoteEndPoint': remote_ip,
'NowPlayingItem': {
'Name': type_names.get(
state['media_type'], 'Test Content'
),
'Type': state['media_type']
},
'PlayState': {
'IsPaused': state['paused']
}
}]
else:
sessions = []
self.wfile.write(json.dumps(sessions).encode())
else:
self.send_response(404)
self.end_headers()
def do_POST(self):
if self.path.startswith('/control/'):
try:
content_length = int(self.headers.get('Content-Length', 0))
post_data = self.rfile.read(content_length)
data = json.loads(post_data.decode()) if post_data else {}
if not hasattr(self.server, 'test_state'):
self.server.test_state = {
'streaming': False,
'paused': False,
'local': False,
'media_type': 'Movie'
}
if self.path == '/control/state':
# Set complete state
self.server.test_state.update(data)
self.send_response(200)
self.end_headers()
self.wfile.write(b'OK')
state_str = str(self.server.test_state)
print(f'Jellyfin Mock: State updated to {state_str}')
elif self.path == '/control/reset':
# Reset to default state
self.server.test_state = {
'streaming': False,
'paused': False,
'local': False,
'media_type': 'Movie'
}
self.send_response(200)
self.end_headers()
self.wfile.write(b'OK')
print('Jellyfin Mock: State reset')
else:
self.send_response(404)
self.end_headers()
except Exception as e:
print(f'Jellyfin Mock: Control error: {e}')
self.send_response(500)
self.end_headers()
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args):
print(f'Jellyfin Mock: {format % args}')
with socketserver.TCPServer(('127.0.0.1', 8096), MockJellyfinHandler) as httpd:
print('Mock Jellyfin server started on port 8096')
httpd.serve_forever()
''
);
Restart = "always";
RestartSec = "5s";
}; };
}; };
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
curl curl
python3 ffmpeg
]; ];
virtualisation.diskSize = 3 * 1024;
networking.firewall.allowedTCPPorts = [ networking.firewall.allowedTCPPorts = [
8096 8096
8080 8080
]; ];
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
{
address = "192.168.1.1";
prefixLength = 24;
}
];
networking.interfaces.eth1.ipv4.routes = [
{
address = "203.0.113.0";
prefixLength = 24;
}
];
# Create directories for qBittorrent
systemd.tmpfiles.rules = [
"d /var/lib/qbittorrent/downloads 0755 qbittorrent qbittorrent"
"d /var/lib/qbittorrent/incomplete 0755 qbittorrent qbittorrent"
];
};
# Public test IP (RFC 5737 TEST-NET-3) so Jellyfin sees it as external
client = {
environment.systemPackages = [ pkgs.curl ];
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
{
address = "203.0.113.10";
prefixLength = 24;
}
];
networking.interfaces.eth1.ipv4.routes = [
{
address = "192.168.1.0";
prefixLength = 24;
}
];
}; };
}; };
testScript = '' testScript = ''
start_all()
# Wait for services to start
server.wait_for_unit("multi-user.target")
server.wait_for_unit("mock-jellyfin.service")
server.wait_for_unit("mock-qbittorrent.service")
# Wait for services to be accessible
server.wait_for_open_port(8096) # Mock Jellyfin
server.wait_for_open_port(8080) # Mock qBittorrent
import time
import json import json
import time
from urllib.parse import urlencode
time.sleep(5) auth_header = 'MediaBrowser Client="NixOS Test", DeviceId="test-1337", Device="TestDevice", Version="1.0"'
# Helper function to set mock server state def api_get(path, token=None):
def set_jellyfin_state(streaming=False, paused=False, local=False, media_type="Movie"): header = auth_header + (f", Token={token}" if token else "")
state = { return f"curl -sf 'http://server:8096{path}' -H 'X-Emby-Authorization:{header}'"
"streaming": streaming,
"paused": paused,
"local": local,
"media_type": media_type
}
server.succeed(f"curl -s -X POST -H 'Content-Type: application/json' -d '{json.dumps(state)}' http://localhost:8096/control/state")
# Helper function to get current qBittorrent throttling state def api_post(path, json_file=None, token=None):
def get_throttling_state(): header = auth_header + (f", Token={token}" if token else "")
result = server.succeed("curl -s http://localhost:8080/api/v2/transfer/speedLimitsMode") if json_file:
return result.strip() == "1" return f"curl -sf -X POST 'http://server:8096{path}' -d '@{json_file}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{header}'"
return f"curl -sf -X POST 'http://server:8096{path}' -H 'X-Emby-Authorization:{header}'"
print("\\nTesting initial state...") def is_throttled():
assert not get_throttling_state(), "qBittorrent should start with normal speed limits" return server.succeed("curl -s http://localhost:8080/api/v2/transfer/speedLimitsMode").strip() == "1"
sessions_result = server.succeed("curl -s http://localhost:8096/Sessions") def get_alt_dl_limit():
print(f"Initial Jellyfin sessions: {sessions_result}") prefs = json.loads(server.succeed("curl -s http://localhost:8080/api/v2/app/preferences"))
assert "[]" in sessions_result, "Should be no streaming sessions initially" return prefs["alt_dl_limit"]
# Start the monitor with fast delays for testing def get_alt_up_limit():
python_path = "${pkgs.python3.withPackages (ps: with ps; [ requests ])}/bin/python" prefs = json.loads(server.succeed("curl -s http://localhost:8080/api/v2/app/preferences"))
monitor_path = "${../services/jellyfin-qbittorrent-monitor.py}" return prefs["alt_up_limit"]
def are_torrents_paused():
torrents = json.loads(server.succeed("curl -s 'http://localhost:8080/api/v2/torrents/info'"))
if not torrents:
return False
return all(t["state"].startswith("stopped") for t in torrents)
movie_id: str = ""
media_source_id: str = ""
start_all()
server.wait_for_unit("jellyfin.service")
server.wait_for_open_port(8096)
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
server.wait_for_unit("qbittorrent.service")
server.wait_for_open_port(8080)
# Wait for qBittorrent WebUI to be responsive
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
with subtest("Complete Jellyfin setup wizard"):
server.wait_until_succeeds(api_get("/Startup/Configuration"))
server.succeed(api_get("/Startup/FirstUser"))
server.succeed(api_post("/Startup/Complete"))
with subtest("Authenticate and get token"):
auth_result = json.loads(server.succeed(api_post("/Users/AuthenticateByName", "${payloads.auth}")))
token = auth_result["AccessToken"]
user_id = auth_result["User"]["Id"]
with subtest("Create test video library"):
tempdir = server.succeed("mktemp -d -p /var/lib/jellyfin").strip()
server.succeed(f"chmod 755 '{tempdir}'")
server.succeed(f"ffmpeg -f lavfi -i testsrc2=duration=5 '{tempdir}/Test Movie (2024) [1080p].mkv'")
add_folder_query = urlencode({
"name": "Test Library",
"collectionType": "Movies",
"paths": tempdir,
"refreshLibrary": "true",
})
server.succeed(api_post(f"/Library/VirtualFolders?{add_folder_query}", "${payloads.empty}", token))
def is_library_ready(_):
folders = json.loads(server.succeed(api_get("/Library/VirtualFolders", token)))
return all(f.get("RefreshStatus") == "Idle" for f in folders)
retry(is_library_ready, timeout=60)
def get_movie(_):
global movie_id, media_source_id
items = json.loads(server.succeed(api_get(f"/Users/{user_id}/Items?IncludeItemTypes=Movie&Recursive=true", token)))
if items["TotalRecordCount"] > 0:
movie_id = items["Items"][0]["Id"]
item_info = json.loads(server.succeed(api_get(f"/Users/{user_id}/Items/{movie_id}", token)))
media_source_id = item_info["MediaSources"][0]["Id"]
return True
return False
retry(get_movie, timeout=60)
with subtest("Start monitor service"):
python = "${pkgs.python3.withPackages (ps: [ ps.requests ])}/bin/python"
monitor = "${../services/jellyfin-qbittorrent-monitor.py}"
server.succeed(f""" server.succeed(f"""
systemd-run --unit=jellyfin-qbittorrent-monitor-test \\ systemd-run --unit=monitor-test \
--setenv=JELLYFIN_URL=http://localhost:8096 \\ --setenv=JELLYFIN_URL=http://localhost:8096 \
--setenv=QBITTORRENT_URL=http://localhost:8080 \\ --setenv=JELLYFIN_API_KEY={token} \
--setenv=CHECK_INTERVAL=1 \\ --setenv=QBITTORRENT_URL=http://localhost:8080 \
--setenv=STREAMING_START_DELAY=1 \\ --setenv=CHECK_INTERVAL=1 \
--setenv=STREAMING_STOP_DELAY=1 \\ --setenv=STREAMING_START_DELAY=1 \
{python_path} {monitor_path} --setenv=STREAMING_STOP_DELAY=1 \
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
--setenv=SERVICE_BUFFER=2000000 \
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
--setenv=MIN_TORRENT_SPEED=100 \
{python} {monitor}
""") """)
time.sleep(2)
assert not is_throttled(), "Should start unthrottled"
client_auth = 'MediaBrowser Client="External Client", DeviceId="external-9999", Device="ExternalDevice", Version="1.0"'
client_auth2 = 'MediaBrowser Client="External Client 2", DeviceId="external-8888", Device="ExternalDevice2", Version="1.0"'
server_ip = "192.168.1.1"
with subtest("Client authenticates from external network"):
auth_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
client_auth_result = json.loads(client.succeed(auth_cmd))
client_token = client_auth_result["AccessToken"]
with subtest("Second client authenticates from external network"):
auth_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
client_auth_result2 = json.loads(client.succeed(auth_cmd2))
client_token2 = client_auth_result2["AccessToken"]
with subtest("External video playback triggers throttling"):
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-1",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(2)
assert is_throttled(), "Should throttle for external video playback"
with subtest("Pausing disables throttling"):
playback_progress = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-1",
"IsPaused": True,
"PositionTicks": 10000000,
}
progress_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Progress' -d '{json.dumps(playback_progress)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(progress_cmd)
time.sleep(2)
assert not is_throttled(), "Should unthrottle when paused"
with subtest("Resuming re-enables throttling"):
playback_progress["IsPaused"] = False
playback_progress["PositionTicks"] = 20000000
progress_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Progress' -d '{json.dumps(playback_progress)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(progress_cmd)
time.sleep(2)
assert is_throttled(), "Should re-throttle when resumed"
with subtest("Stopping playback disables throttling"):
playback_stop = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-1",
"PositionTicks": 50000000,
}
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(stop_cmd)
time.sleep(2)
assert not is_throttled(), "Should unthrottle when playback stops"
with subtest("Single stream sets proportional alt speed limits"):
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-proportional",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(3) time.sleep(3)
# Define test scenarios assert is_throttled(), "Should be in alt speed mode during streaming"
media_types = ["Movie", "Episode", "Video", "Audio"] dl_limit = get_alt_dl_limit()
playback_states = [False, True] # False=playing, True=paused ul_limit = get_alt_up_limit()
network_locations = [False, True] # False=external, True=local # Both upload and download should get remaining bandwidth (proportional)
assert dl_limit > 0, f"Download limit should be > 0, got {dl_limit}"
assert ul_limit == dl_limit, f"Upload limit ({ul_limit}) should equal download limit ({dl_limit})"
test_count = 0 # Stop playback
for media_type in media_types: playback_stop = {
for is_paused in playback_states: "ItemId": movie_id,
for is_local in network_locations: "MediaSourceId": media_source_id,
test_count += 1 "PlaySessionId": "test-play-session-proportional",
print(f"\\nTest {test_count}: {media_type}, {'paused' if is_paused else 'playing'}, {'local' if is_local else 'external'}") "PositionTicks": 50000000,
}
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(stop_cmd)
time.sleep(3)
# Set streaming state with subtest("Multiple streams reduce available bandwidth"):
set_jellyfin_state(streaming=True, paused=is_paused, local=is_local, media_type=media_type) # Start first stream
time.sleep(1.5) # Wait for monitor to detect and apply changes playback1 = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-multi-1",
"CanSeek": True,
"IsPaused": False,
}
start_cmd1 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback1)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd1)
time.sleep(3)
throttling_active = get_throttling_state() single_dl_limit = get_alt_dl_limit()
# Determine expected behavior: # Start second stream with different client identity
# Throttling should be active only if: playback2 = {
# - Not paused AND "ItemId": movie_id,
# - Not local AND "MediaSourceId": media_source_id,
# - Media type is video (Movie, Episode, Video) - NOT Audio "PlaySessionId": "test-play-session-multi-2",
should_throttle = ( "CanSeek": True,
not is_paused and "IsPaused": False,
not is_local and }
media_type in ["Movie", "Episode", "Video"] start_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback2)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}, Token={client_token2}'"
) client.succeed(start_cmd2)
time.sleep(3)
assert throttling_active == should_throttle, f"Expected {"no " if not should_throttle else ""} throttling for {media_type}, {'paused' if is_paused else 'playing'}, {'local' if is_local else 'external'}" dual_dl_limit = get_alt_dl_limit()
# Two streams should leave less bandwidth than one stream
assert dual_dl_limit < single_dl_limit, f"Two streams ({dual_dl_limit}) should have lower limit than one ({single_dl_limit})"
set_jellyfin_state(streaming=False) # Stop both streams
time.sleep(1.5) # Wait for stop delay stop1 = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-multi-1",
"PositionTicks": 50000000,
}
stop_cmd1 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(stop1)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(stop_cmd1)
assert not get_throttling_state(), "No streaming should disable throttling" stop2 = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-multi-2",
"PositionTicks": 50000000,
}
stop_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(stop2)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}, Token={client_token2}'"
client.succeed(stop_cmd2)
time.sleep(3)
# Start with throttling-enabled state with subtest("Budget exhaustion pauses all torrents"):
set_jellyfin_state(streaming=True, paused=False, local=False, media_type="Movie") # Stop current monitor
time.sleep(1.5) server.succeed("systemctl stop monitor-test || true")
assert get_throttling_state(), "Should enable throttling for external Movie" time.sleep(1)
# Switch to paused (should disable throttling) # Add a dummy torrent so we can check pause state
set_jellyfin_state(streaming=True, paused=True, local=False, media_type="Movie") server.succeed("curl -sf -X POST 'http://localhost:8080/api/v2/torrents/add' -d 'urls=magnet:?xt=urn:btih:0000000000000000000000000000000000000001%26dn=test-torrent'")
time.sleep(1.5) time.sleep(2)
assert not get_throttling_state(), "Should disable throttling when paused"
# Switch back to playing (should re-enable throttling) # Start monitor with impossibly low budget
set_jellyfin_state(streaming=True, paused=False, local=False, media_type="Movie") server.succeed(f"""
time.sleep(1.5) systemd-run --unit=monitor-exhaust \
assert get_throttling_state(), "Should re-enable throttling when unpaused" --setenv=JELLYFIN_URL=http://localhost:8096 \
--setenv=JELLYFIN_API_KEY={token} \
--setenv=QBITTORRENT_URL=http://localhost:8080 \
--setenv=CHECK_INTERVAL=1 \
--setenv=STREAMING_START_DELAY=1 \
--setenv=STREAMING_STOP_DELAY=1 \
--setenv=TOTAL_BANDWIDTH_BUDGET=1000 \
--setenv=SERVICE_BUFFER=500 \
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
--setenv=MIN_TORRENT_SPEED=100 \
{python} {monitor}
""")
time.sleep(2)
# Start a stream - this will exceed the tiny budget
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-exhaust",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(3)
assert are_torrents_paused(), "Torrents should be paused when budget is exhausted"
with subtest("Recovery from pause restores unlimited"):
# Stop the stream
playback_stop = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-exhaust",
"PositionTicks": 50000000,
}
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(stop_cmd)
time.sleep(3)
assert not is_throttled(), "Should return to unlimited after streams stop"
assert not are_torrents_paused(), "Torrents should be resumed after streams stop"
# Clean up: stop exhaust monitor, restart normal monitor
server.succeed("systemctl stop monitor-exhaust || true")
time.sleep(1)
server.succeed(f"""
systemd-run --unit=monitor-test \
--setenv=JELLYFIN_URL=http://localhost:8096 \
--setenv=JELLYFIN_API_KEY={token} \
--setenv=QBITTORRENT_URL=http://localhost:8080 \
--setenv=CHECK_INTERVAL=1 \
--setenv=STREAMING_START_DELAY=1 \
--setenv=STREAMING_STOP_DELAY=1 \
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
--setenv=SERVICE_BUFFER=2000000 \
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
--setenv=MIN_TORRENT_SPEED=100 \
{python} {monitor}
""")
time.sleep(2)
with subtest("Local playback does NOT trigger throttling"):
local_auth = 'MediaBrowser Client="Local Client", DeviceId="local-1111", Device="LocalDevice", Version="1.0"'
local_auth_result = json.loads(server.succeed(
f"curl -sf -X POST 'http://localhost:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}'"
))
local_token = local_auth_result["AccessToken"]
local_playback = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-local",
"CanSeek": True,
"IsPaused": False,
}
server.succeed(f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing' -d '{json.dumps(local_playback)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}, Token={local_token}'")
time.sleep(2)
assert not is_throttled(), "Should NOT throttle for local playback"
local_playback["PositionTicks"] = 50000000
server.succeed(f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing/Stopped' -d '{json.dumps(local_playback)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}, Token={local_token}'")
# === SERVICE RESTART TESTS ===
with subtest("qBittorrent restart during throttled state re-applies throttling"):
# Start external playback to trigger throttling
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-restart-1",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(2)
assert is_throttled(), "Should be throttled before qBittorrent restart"
# Restart qBittorrent (this resets alt_speed to its config default - disabled)
server.succeed("systemctl restart qbittorrent.service")
server.wait_for_unit("qbittorrent.service")
server.wait_for_open_port(8080)
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
# qBittorrent restarted - alt_speed is now False (default on startup)
# The monitor should detect this and re-apply throttling
time.sleep(3) # Give monitor time to detect and re-apply
assert is_throttled(), "Monitor should re-apply throttling after qBittorrent restart"
# Stop playback to clean up
playback_stop = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-restart-1",
"PositionTicks": 50000000,
}
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(stop_cmd)
time.sleep(2)
with subtest("qBittorrent restart during unthrottled state stays unthrottled"):
# Verify we're unthrottled (no active streams)
assert not is_throttled(), "Should be unthrottled before test"
# Restart qBittorrent
server.succeed("systemctl restart qbittorrent.service")
server.wait_for_unit("qbittorrent.service")
server.wait_for_open_port(8080)
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
# Give monitor time to check state
time.sleep(3)
assert not is_throttled(), "Should remain unthrottled after qBittorrent restart with no streams"
with subtest("Jellyfin restart during throttled state maintains throttling"):
# Start external playback to trigger throttling
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-restart-2",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(2)
assert is_throttled(), "Should be throttled before Jellyfin restart"
# Restart Jellyfin
server.succeed("systemctl restart jellyfin.service")
server.wait_for_unit("jellyfin.service")
server.wait_for_open_port(8096)
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
# During Jellyfin restart, monitor can't reach Jellyfin
# After restart, sessions are cleared - monitor should eventually unthrottle
# But during the unavailability window, throttling should be maintained (fail-safe)
time.sleep(3)
# Re-authenticate (old token invalid after restart)
client_auth_result = json.loads(client.succeed(
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
))
client_token = client_auth_result["AccessToken"]
client_auth_result2 = json.loads(client.succeed(
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
))
client_token2 = client_auth_result2["AccessToken"]
# No active streams after Jellyfin restart, should eventually unthrottle
time.sleep(3)
assert not is_throttled(), "Should unthrottle after Jellyfin restart clears sessions"
with subtest("Monitor recovers after Jellyfin temporary unavailability"):
# Re-authenticate with fresh token
client_auth_result = json.loads(client.succeed(
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
))
client_token = client_auth_result["AccessToken"]
client_auth_result2 = json.loads(client.succeed(
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
))
client_token2 = client_auth_result2["AccessToken"]
# Start playback
playback_start = {
"ItemId": movie_id,
"MediaSourceId": media_source_id,
"PlaySessionId": "test-play-session-restart-3",
"CanSeek": True,
"IsPaused": False,
}
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
client.succeed(start_cmd)
time.sleep(2)
assert is_throttled(), "Should be throttled"
# Stop Jellyfin briefly (simulating temporary unavailability)
server.succeed("systemctl stop jellyfin.service")
time.sleep(2)
# During unavailability, throttle state should be maintained (fail-safe)
assert is_throttled(), "Should maintain throttle during Jellyfin unavailability"
# Bring Jellyfin back
server.succeed("systemctl start jellyfin.service")
server.wait_for_unit("jellyfin.service")
server.wait_for_open_port(8096)
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
# After Jellyfin comes back, sessions are gone - should unthrottle
time.sleep(3)
assert not is_throttled(), "Should unthrottle after Jellyfin returns with no sessions"
''; '';
} }

View File

@@ -6,25 +6,7 @@
... ...
}: }:
let let
# Create pkgs with nix-minecraft overlay and unfree packages allowed testServiceConfigs = {
testPkgs = import inputs.nixpkgs {
system = pkgs.stdenv.hostPlatform.system;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "minecraft-server" ];
overlays = [
inputs.nix-minecraft.overlay
(import ../modules/overlays.nix)
];
};
# Create a wrapper module that imports the actual minecraft service
minecraftService =
{ config, ... }:
{
imports = [
(import ../services/minecraft.nix {
inherit lib config inputs;
pkgs = testPkgs;
service_configs = {
minecraft = { minecraft = {
server_name = "main"; server_name = "main";
parent_dir = "/var/lib/minecraft"; parent_dir = "/var/lib/minecraft";
@@ -32,25 +14,36 @@ let
https = { https = {
domain = "test.local"; domain = "test.local";
}; };
ports = {
minecraft = 25565;
};
zpool_ssds = ""; zpool_ssds = "";
}; };
username = "testuser";
}) # Create pkgs with nix-minecraft overlay and unfree packages allowed
testPkgs = import inputs.nixpkgs {
system = pkgs.stdenv.targetPlatform.system;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "minecraft-server" ];
overlays = [
inputs.nix-minecraft.overlay
(import ../modules/overlays.nix)
]; ];
# Override nixpkgs config to prevent conflicts in test environment
nixpkgs.config = lib.mkForce {
allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "minecraft-server" ];
};
}; };
in in
testPkgs.testers.runNixOSTest { testPkgs.testers.runNixOSTest {
name = "minecraft server startup test"; name = "minecraft server startup test";
node.specialArgs = {
inherit inputs lib;
service_configs = testServiceConfigs;
username = "testuser";
};
nodes.machine = nodes.machine =
{ ... }: { lib, ... }:
{ {
imports = [ imports = [
minecraftService ../services/minecraft.nix
]; ];
# Enable caddy service (required by minecraft service) # Enable caddy service (required by minecraft service)
@@ -67,6 +60,10 @@ testPkgs.testers.runNixOSTest {
wants = lib.mkForce [ ]; wants = lib.mkForce [ ];
after = lib.mkForce [ ]; after = lib.mkForce [ ];
requires = lib.mkForce [ ]; requires = lib.mkForce [ ];
serviceConfig = {
Nice = lib.mkForce 0;
LimitMEMLOCK = lib.mkForce "infinity";
};
}; };
# Test-specific overrides only - reduce memory for testing # Test-specific overrides only - reduce memory for testing

174
tests/ntfy-alerts.nix Normal file
View File

@@ -0,0 +1,174 @@
{
config,
lib,
pkgs,
...
}:
let
testPkgs = pkgs.appendOverlays [ (import ../modules/overlays.nix) ];
in
testPkgs.testers.runNixOSTest {
name = "ntfy-alerts";
nodes.machine =
{ pkgs, ... }:
{
imports = [
../modules/ntfy-alerts.nix
];
system.stateVersion = config.system.stateVersion;
virtualisation.memorySize = 2048;
environment.systemPackages = with pkgs; [
curl
jq
];
# Create test topic file
systemd.tmpfiles.rules = [
"f /run/ntfy-test-topic 0644 root root - test-alerts"
];
# Mock ntfy server that records POST requests
systemd.services.mock-ntfy =
let
mockNtfyScript = pkgs.writeScript "mock-ntfy.py" ''
import json
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from datetime import datetime
REQUESTS_FILE = "/tmp/ntfy-requests.json"
class MockNtfy(BaseHTTPRequestHandler):
def _respond(self, code=200, body=b"Ok"):
self.send_response(code)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(body if isinstance(body, bytes) else body.encode())
def do_GET(self):
self._respond()
def do_POST(self):
content_length = int(self.headers.get("Content-Length", 0))
body = self.rfile.read(content_length).decode() if content_length > 0 else ""
request_data = {
"timestamp": datetime.now().isoformat(),
"path": self.path,
"headers": dict(self.headers),
"body": body,
}
# Load existing requests or start new list
requests = []
if os.path.exists(REQUESTS_FILE):
try:
with open(REQUESTS_FILE, "r") as f:
requests = json.load(f)
except:
requests = []
requests.append(request_data)
with open(REQUESTS_FILE, "w") as f:
json.dump(requests, f, indent=2)
self._respond()
def log_message(self, format, *args):
pass
HTTPServer(("0.0.0.0", 8080), MockNtfy).serve_forever()
'';
in
{
description = "Mock ntfy server";
wantedBy = [ "multi-user.target" ];
before = [ "ntfy-alert@test-fail.service" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${mockNtfyScript}";
Type = "simple";
};
};
# Test service that will fail
systemd.services.test-fail = {
description = "Test service that fails";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.coreutils}/bin/false";
};
};
# Configure ntfy-alerts to use mock server
services.ntfyAlerts = {
enable = true;
serverUrl = "http://localhost:8080";
topicFile = "/run/ntfy-test-topic";
};
};
testScript = ''
import json
import time
start_all()
# Wait for mock ntfy server to be ready
machine.wait_for_unit("mock-ntfy.service")
machine.wait_until_succeeds("curl -sf http://localhost:8080/", timeout=30)
# Verify the ntfy-alert@ template service exists
machine.succeed("systemctl list-unit-files | grep ntfy-alert@")
# Verify the global OnFailure drop-in is configured
machine.succeed("cat /etc/systemd/system/service.d/onfailure.conf | grep -q 'OnFailure=ntfy-alert@%p.service'")
# Trigger the test-fail service
machine.succeed("systemctl start test-fail.service || true")
# Wait a moment for the failure notification to be sent
time.sleep(2)
# Verify the ntfy-alert@test-fail service ran
machine.succeed("systemctl is-active ntfy-alert@test-fail.service || systemctl is-failed ntfy-alert@test-fail.service || true")
# Check that the mock server received a POST request
machine.wait_until_succeeds("test -f /tmp/ntfy-requests.json", timeout=30)
# Verify the request content
result = machine.succeed("cat /tmp/ntfy-requests.json")
requests = json.loads(result)
assert len(requests) >= 1, f"Expected at least 1 request, got {len(requests)}"
# Check the first request
req = requests[0]
assert "/test-alerts" in req["path"], f"Expected path to contain /test-alerts, got {req['path']}"
assert "Title" in req["headers"], "Expected Title header"
assert "test-fail" in req["headers"]["Title"], f"Expected Title to contain 'test-fail', got {req['headers']['Title']}"
assert req["headers"]["Priority"] == "high", f"Expected Priority 'high', got {req['headers'].get('Priority')}"
assert req["headers"]["Tags"] == "warning", f"Expected Tags 'warning', got {req['headers'].get('Tags')}"
print(f"Received notification: Title={req['headers']['Title']}, Body={req['body'][:100]}...")
# Idempotency test: trigger failure again
machine.succeed("rm /tmp/ntfy-requests.json")
machine.succeed("systemctl reset-failed test-fail.service || true")
machine.succeed("systemctl start test-fail.service || true")
time.sleep(2)
# Verify another notification was sent
machine.wait_until_succeeds("test -f /tmp/ntfy-requests.json", timeout=30)
result = machine.succeed("cat /tmp/ntfy-requests.json")
requests = json.loads(result)
assert len(requests) >= 1, f"Expected at least 1 request after second failure, got {len(requests)}"
print("All tests passed!")
'';
}

View File

@@ -12,4 +12,16 @@ in
testTest = handleTest ./testTest.nix; testTest = handleTest ./testTest.nix;
minecraftTest = handleTest ./minecraft.nix; minecraftTest = handleTest ./minecraft.nix;
jellyfinQbittorrentMonitorTest = handleTest ./jellyfin-qbittorrent-monitor.nix; jellyfinQbittorrentMonitorTest = handleTest ./jellyfin-qbittorrent-monitor.nix;
filePermsTest = handleTest ./file-perms.nix;
# fail2ban tests
fail2banSshTest = handleTest ./fail2ban-ssh.nix;
fail2banCaddyTest = handleTest ./fail2ban-caddy.nix;
fail2banGiteaTest = handleTest ./fail2ban-gitea.nix;
fail2banVaultwardenTest = handleTest ./fail2ban-vaultwarden.nix;
fail2banImmichTest = handleTest ./fail2ban-immich.nix;
fail2banJellyfinTest = handleTest ./fail2ban-jellyfin.nix;
# ntfy alerts test
ntfyAlertsTest = handleTest ./ntfy-alerts.nix;
} }