zfs: fix qbittorrent
This commit is contained in:
parent
ae5189b6c6
commit
223910744a
99
lib.nix
99
lib.nix
@ -69,67 +69,68 @@ inputs.nixpkgs.lib.extend (
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "ensure-zfs-mounts-with-pool-${serviceName}";
|
||||
runtimeInputs = with pkgs; [
|
||||
gawk
|
||||
coreutils
|
||||
config.boot.zfs.package
|
||||
];
|
||||
ExecStart = [
|
||||
(lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "ensure-zfs-mounts-with-pool-${serviceName}-${zpool}";
|
||||
runtimeInputs = with pkgs; [
|
||||
gawk
|
||||
coreutils
|
||||
config.boot.zfs.package
|
||||
];
|
||||
|
||||
text = ''
|
||||
set -euo pipefail
|
||||
text = ''
|
||||
set -euo pipefail
|
||||
|
||||
echo "Ensuring ZFS mounts for service: ${serviceName}"
|
||||
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
||||
echo "Ensuring ZFS mounts for service: ${serviceName} (pool: ${zpool})"
|
||||
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
||||
|
||||
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
||||
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
||||
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
||||
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
||||
|
||||
# Additional runtime check: verify paths are on correct zpool
|
||||
${lib.optionalString (zpool != "") ''
|
||||
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
||||
# Additional runtime check: verify paths are on correct zpool
|
||||
${lib.optionalString (zpool != "") ''
|
||||
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
||||
|
||||
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
||||
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This loop handles variable number of directories, shellcheck false positive
|
||||
# shellcheck disable=SC2043
|
||||
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
||||
echo "Checking: $target"
|
||||
|
||||
# Find dataset that has this mountpoint
|
||||
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
||||
|
||||
if [ -z "$dataset" ]; then
|
||||
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
||||
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
||||
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract pool name from dataset (first part before /)
|
||||
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
||||
# shellcheck disable=SC2043
|
||||
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
||||
echo "Checking: $target"
|
||||
|
||||
if [ "$actual_pool" != "${zpool}" ]; then
|
||||
echo "ERROR: ZFS pool mismatch for $target" >&2
|
||||
echo " Expected pool: ${zpool}" >&2
|
||||
echo " Actual pool: $actual_pool" >&2
|
||||
echo " Dataset: $dataset" >&2
|
||||
exit 1
|
||||
fi
|
||||
# Find dataset that has this mountpoint
|
||||
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
||||
|
||||
echo "$target is on $dataset (pool: $actual_pool)"
|
||||
done
|
||||
if [ -z "$dataset" ]; then
|
||||
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All paths verified successfully on pool '${zpool}'"
|
||||
''}
|
||||
# Extract pool name from dataset (first part before /)
|
||||
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
||||
|
||||
echo "Mount validation completed for ${serviceName}"
|
||||
'';
|
||||
}
|
||||
);
|
||||
if [ "$actual_pool" != "${zpool}" ]; then
|
||||
echo "ERROR: ZFS pool mismatch for $target" >&2
|
||||
echo " Expected pool: ${zpool}" >&2
|
||||
echo " Actual pool: $actual_pool" >&2
|
||||
echo " Dataset: $dataset" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$target is on $dataset (pool: $actual_pool)"
|
||||
done
|
||||
|
||||
echo "All paths verified successfully on pool '${zpool}'"
|
||||
''}
|
||||
|
||||
echo "Mount validation completed for ${serviceName} (pool: ${zpool})"
|
||||
'';
|
||||
}
|
||||
))
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -11,6 +11,9 @@
|
||||
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_hdds [
|
||||
service_configs.torrents_path
|
||||
config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath
|
||||
|
||||
])
|
||||
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_ssds [
|
||||
"${config.services.qbittorrent.profileDir}/qBittorrent"
|
||||
])
|
||||
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
|
||||
|
||||
@ -21,6 +21,10 @@ testPkgs.testers.runNixOSTest {
|
||||
|
||||
# Test service with paths outside zpool (should fail assertion)
|
||||
(lib.serviceMountWithZpool "invalid-service" "rpool2" [ "/mnt/rpool_data" ])
|
||||
|
||||
# Test multi-command logic: service with multiple serviceMountWithZpool calls
|
||||
(lib.serviceMountWithZpool "multi-service" "rpool" [ "/mnt/rpool_data" ])
|
||||
(lib.serviceMountWithZpool "multi-service" "rpool2" [ "/mnt/rpool2_data" ])
|
||||
];
|
||||
|
||||
virtualisation = {
|
||||
@ -56,6 +60,14 @@ testPkgs.testers.runNixOSTest {
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."multi-service" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
@ -105,6 +117,7 @@ testPkgs.testers.runNixOSTest {
|
||||
assert "Expected pool: rpool2" in journal_output
|
||||
assert "Actual pool: rpool" in journal_output
|
||||
|
||||
print("SUCCESS: Runtime validation correctly detected zpool mismatch!")
|
||||
machine.succeed("systemctl start multi-service")
|
||||
machine.succeed("systemctl is-active multi-service-mounts.service")
|
||||
'';
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user