zfs: HEAVILY REFACTOR subvolume handling
This commit is contained in:
parent
dd9042ae95
commit
ae5189b6c6
126
lib.nix
126
lib.nix
@ -9,28 +9,6 @@ inputs.nixpkgs.lib.extend (
|
|||||||
lib = prev;
|
lib = prev;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
serviceMountDeps =
|
|
||||||
serviceName: dirs:
|
|
||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
systemd.services."${serviceName}_mounts" = {
|
|
||||||
wants = [ "zfs.target" ];
|
|
||||||
before = [ "${serviceName}.service" ];
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
RemainAfterExit = true;
|
|
||||||
ExecStart = "${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.${serviceName} = {
|
|
||||||
wants = [ "${serviceName}_mounts.service" ];
|
|
||||||
after = [ "${serviceName}_mounts.service" ];
|
|
||||||
requires = [ "${serviceName}_mounts.service" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# stolen from: https://stackoverflow.com/a/42398526
|
# stolen from: https://stackoverflow.com/a/42398526
|
||||||
optimizeWithFlags =
|
optimizeWithFlags =
|
||||||
pkg: flags:
|
pkg: flags:
|
||||||
@ -79,25 +57,101 @@ inputs.nixpkgs.lib.extend (
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
serviceDependZpool =
|
serviceMountWithZpool =
|
||||||
serviceName: zpool:
|
serviceName: zpool: dirs:
|
||||||
{ config, ... }:
|
{ pkgs, config, ... }:
|
||||||
{
|
{
|
||||||
config = lib.mkIf (zpool != "") {
|
systemd.services."${serviceName}-mounts" = {
|
||||||
|
wants = [ "zfs.target" ] ++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
||||||
|
after = lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
||||||
|
before = [ "${serviceName}.service" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
ExecStart = lib.getExe (
|
||||||
|
pkgs.writeShellApplication {
|
||||||
|
name = "ensure-zfs-mounts-with-pool-${serviceName}";
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
gawk
|
||||||
|
coreutils
|
||||||
|
config.boot.zfs.package
|
||||||
|
];
|
||||||
|
|
||||||
|
text = ''
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
echo "Ensuring ZFS mounts for service: ${serviceName}"
|
||||||
|
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
||||||
|
|
||||||
|
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
||||||
|
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
||||||
|
|
||||||
|
# Additional runtime check: verify paths are on correct zpool
|
||||||
|
${lib.optionalString (zpool != "") ''
|
||||||
|
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
||||||
|
|
||||||
|
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
||||||
|
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# This loop handles variable number of directories, shellcheck false positive
|
||||||
|
# shellcheck disable=SC2043
|
||||||
|
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
||||||
|
echo "Checking: $target"
|
||||||
|
|
||||||
|
# Find dataset that has this mountpoint
|
||||||
|
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
||||||
|
|
||||||
|
if [ -z "$dataset" ]; then
|
||||||
|
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract pool name from dataset (first part before /)
|
||||||
|
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
||||||
|
|
||||||
|
if [ "$actual_pool" != "${zpool}" ]; then
|
||||||
|
echo "ERROR: ZFS pool mismatch for $target" >&2
|
||||||
|
echo " Expected pool: ${zpool}" >&2
|
||||||
|
echo " Actual pool: $actual_pool" >&2
|
||||||
|
echo " Dataset: $dataset" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$target is on $dataset (pool: $actual_pool)"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "All paths verified successfully on pool '${zpool}'"
|
||||||
|
''}
|
||||||
|
|
||||||
|
echo "Mount validation completed for ${serviceName}"
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
systemd.services.${serviceName} = {
|
systemd.services.${serviceName} = {
|
||||||
wants = [ "zfs-import-${zpool}.service" ];
|
wants = [
|
||||||
after = [ "zfs-import-${zpool}.service" ];
|
"${serviceName}-mounts.service"
|
||||||
requires = [ "zfs-import-${zpool}.service" ];
|
];
|
||||||
|
after = [
|
||||||
|
"${serviceName}-mounts.service"
|
||||||
|
];
|
||||||
|
requires = [
|
||||||
|
"${serviceName}-mounts.service"
|
||||||
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
# assert that the pool is even enabled
|
# assert that the pool is even enabled
|
||||||
assertions = [
|
#assertions = lib.optionals (zpool != "") [
|
||||||
{
|
# {
|
||||||
assertion = builtins.elem zpool config.boot.zfs.extraPools;
|
# assertion = builtins.elem zpool config.boot.zfs.extraPools;
|
||||||
message = "${zpool} is not enabled in `boot.zfs.extraPools`";
|
# message = "${zpool} is not enabled in `boot.zfs.extraPools`";
|
||||||
}
|
# }
|
||||||
];
|
#];
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@ -7,16 +7,14 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "vaultwarden" [
|
(lib.serviceMountWithZpool "vaultwarden" service_configs.zpool_ssds [
|
||||||
service_configs.vaultwarden.path
|
service_configs.vaultwarden.path
|
||||||
config.services.vaultwarden.backupDir
|
config.services.vaultwarden.backupDir
|
||||||
])
|
])
|
||||||
(lib.serviceMountDeps "backup-vaultwarden" [
|
(lib.serviceMountWithZpool "backup-vaultwarden" service_configs.zpool_ssds [
|
||||||
service_configs.vaultwarden.path
|
service_configs.vaultwarden.path
|
||||||
config.services.vaultwarden.backupDir
|
config.services.vaultwarden.backupDir
|
||||||
])
|
])
|
||||||
(lib.serviceDependZpool "vaultwarden" service_configs.zpool_ssds)
|
|
||||||
(lib.serviceDependZpool "backup-vaultwarden" service_configs.zpool_ssds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.vaultwarden = {
|
services.vaultwarden = {
|
||||||
|
|||||||
@ -44,10 +44,9 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "caddy" [
|
(lib.serviceMountWithZpool "caddy" service_configs.zpool_ssds [
|
||||||
config.services.caddy.dataDir
|
config.services.caddy.dataDir
|
||||||
])
|
])
|
||||||
(lib.serviceDependZpool "caddy" service_configs.zpool_ssds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.caddy = {
|
services.caddy = {
|
||||||
|
|||||||
@ -7,8 +7,7 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "gitea" [ config.services.gitea.stateDir ])
|
(lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ])
|
||||||
(lib.serviceDependZpool "gitea" service_configs.zpool_ssds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.gitea = {
|
services.gitea = {
|
||||||
|
|||||||
@ -7,10 +7,12 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "immich-server" [ config.services.immich.mediaLocation ])
|
(lib.serviceMountWithZpool "immich-server" service_configs.zpool_ssds [
|
||||||
(lib.serviceMountDeps "immich-machine-learning" [ config.services.immich.mediaLocation ])
|
config.services.immich.mediaLocation
|
||||||
(lib.serviceDependZpool "immich-server" service_configs.zpool_ssds)
|
])
|
||||||
(lib.serviceDependZpool "immich-machine-learning" service_configs.zpool_ssds)
|
(lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [
|
||||||
|
config.services.immich.mediaLocation
|
||||||
|
])
|
||||||
];
|
];
|
||||||
|
|
||||||
services.immich = {
|
services.immich = {
|
||||||
|
|||||||
@ -7,11 +7,10 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "jellyfin" [
|
(lib.serviceMountWithZpool "jellyfin" service_configs.zpool_ssds [
|
||||||
config.services.jellyfin.dataDir
|
config.services.jellyfin.dataDir
|
||||||
config.services.jellyfin.cacheDir
|
config.services.jellyfin.cacheDir
|
||||||
])
|
])
|
||||||
(lib.serviceDependZpool "jellyfin" service_configs.zpool_ssds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.jellyfin = {
|
services.jellyfin = {
|
||||||
|
|||||||
@ -7,10 +7,12 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "minecraft-server-${service_configs.minecraft.server_name}" [
|
(lib.serviceMountWithZpool "minecraft-server-${service_configs.minecraft.server_name}"
|
||||||
|
service_configs.zpool_ssds
|
||||||
|
[
|
||||||
"${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}"
|
"${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}"
|
||||||
])
|
]
|
||||||
(lib.serviceDependZpool "minecraft-server-${service_configs.minecraft.server_name}" service_configs.zpool_ssds)
|
)
|
||||||
];
|
];
|
||||||
|
|
||||||
environment.systemPackages = [
|
environment.systemPackages = [
|
||||||
|
|||||||
@ -5,10 +5,9 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "monero" [
|
(lib.serviceMountWithZpool "monero" service_configs.zpool_hdds [
|
||||||
service_configs.monero.dataDir
|
service_configs.monero.dataDir
|
||||||
])
|
])
|
||||||
(lib.serviceDependZpool "monero" service_configs.zpool_hdds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.monero = {
|
services.monero = {
|
||||||
|
|||||||
@ -7,8 +7,9 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "postgresql" [ config.services.postgresql.dataDir ])
|
(lib.serviceMountWithZpool "postgresql" service_configs.zpool_ssds [
|
||||||
(lib.serviceDependZpool "postgresql" service_configs.zpool_ssds)
|
config.services.postgresql.dataDir
|
||||||
|
])
|
||||||
];
|
];
|
||||||
|
|
||||||
services.postgresql = {
|
services.postgresql = {
|
||||||
|
|||||||
@ -8,13 +8,12 @@
|
|||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "qbittorrent" [
|
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_hdds [
|
||||||
service_configs.torrents_path
|
service_configs.torrents_path
|
||||||
config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath
|
config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath
|
||||||
"${config.services.qbittorrent.profileDir}/qBittorrent"
|
"${config.services.qbittorrent.profileDir}/qBittorrent"
|
||||||
])
|
])
|
||||||
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
|
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
|
||||||
(lib.serviceDependZpool "qbittorrent" service_configs.zpool_hdds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
services.qbittorrent = {
|
services.qbittorrent = {
|
||||||
|
|||||||
@ -11,13 +11,11 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "slskd" [
|
(lib.serviceMountWithZpool "slskd" "" [
|
||||||
service_configs.slskd.base
|
service_configs.slskd.base
|
||||||
service_configs.slskd.downloads
|
service_configs.slskd.downloads
|
||||||
service_configs.slskd.incomplete
|
service_configs.slskd.incomplete
|
||||||
])
|
])
|
||||||
(lib.serviceDependZpool "slskd" service_configs.zpool_ssds)
|
|
||||||
(lib.serviceDependZpool "slskd" service_configs.zpool_hdds)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
users.groups."music" = { };
|
users.groups."music" = { };
|
||||||
|
|||||||
@ -7,29 +7,29 @@
|
|||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
# Create pkgs with ensureZfsMounts overlay
|
# Create pkgs with ensureZfsMounts overlay
|
||||||
testPkgs = import inputs.nixpkgs {
|
testPkgs = pkgs.appendOverlays [ (import ../overlays.nix) ];
|
||||||
system = pkgs.system;
|
|
||||||
overlays = [ (import ../overlays.nix) ];
|
|
||||||
};
|
|
||||||
in
|
in
|
||||||
testPkgs.testers.runNixOSTest {
|
testPkgs.testers.runNixOSTest {
|
||||||
name = "zfs folder dependency and mounting test";
|
name = "zfs test";
|
||||||
|
|
||||||
nodes.machine =
|
nodes.machine =
|
||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
(lib.serviceMountDeps "foobar" [ "/mnt/foobar_data" ])
|
# Test valid paths within zpool
|
||||||
(lib.serviceMountDeps "foobarSadge" [
|
(lib.serviceMountWithZpool "test-service" "rpool" [ "/mnt/rpool_data" ])
|
||||||
"/mnt/foobar_data"
|
|
||||||
"/mnt/does_not_exist_lol"
|
|
||||||
])
|
|
||||||
|
|
||||||
|
# Test service with paths outside zpool (should fail assertion)
|
||||||
|
(lib.serviceMountWithZpool "invalid-service" "rpool2" [ "/mnt/rpool_data" ])
|
||||||
];
|
];
|
||||||
|
|
||||||
virtualisation = {
|
virtualisation = {
|
||||||
emptyDiskImages = [
|
emptyDiskImages = [
|
||||||
4096
|
4096
|
||||||
|
4096
|
||||||
];
|
];
|
||||||
|
# Add this to avoid ZFS hanging issues
|
||||||
|
additionalPaths = [ pkgs.zfs ];
|
||||||
};
|
};
|
||||||
networking.hostId = "deadbeef";
|
networking.hostId = "deadbeef";
|
||||||
boot.kernelPackages = config.boot.kernelPackages;
|
boot.kernelPackages = config.boot.kernelPackages;
|
||||||
@ -41,7 +41,7 @@ testPkgs.testers.runNixOSTest {
|
|||||||
ensureZfsMounts
|
ensureZfsMounts
|
||||||
];
|
];
|
||||||
|
|
||||||
systemd.services.foobar = {
|
systemd.services."test-service" = {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
@ -49,7 +49,7 @@ testPkgs.testers.runNixOSTest {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.foobarSadge = {
|
systemd.services."invalid-service" = {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
@ -61,38 +61,50 @@ testPkgs.testers.runNixOSTest {
|
|||||||
testScript = ''
|
testScript = ''
|
||||||
start_all()
|
start_all()
|
||||||
machine.wait_for_unit("multi-user.target")
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
|
||||||
|
# Setup ZFS pool
|
||||||
machine.succeed(
|
machine.succeed(
|
||||||
"parted --script /dev/vdb mklabel msdos",
|
"parted --script /dev/vdb mklabel msdos",
|
||||||
"parted --script /dev/vdb -- mkpart primary 1024M -1s",
|
"parted --script /dev/vdb -- mkpart primary 1024M -1s",
|
||||||
|
"zpool create rpool /dev/vdb1"
|
||||||
)
|
)
|
||||||
|
|
||||||
machine.fail("zfsEnsureMounted")
|
# Setup ZFS pool 2
|
||||||
machine.fail("zfsEnsureMounted /mnt/test_mountpoint")
|
machine.succeed(
|
||||||
|
"parted --script /dev/vdc mklabel msdos",
|
||||||
|
"parted --script /dev/vdc -- mkpart primary 1024M -1s",
|
||||||
|
"zpool create rpool2 /dev/vdc1"
|
||||||
|
)
|
||||||
|
|
||||||
machine.succeed("zpool create rpool /dev/vdb1")
|
machine.succeed("zfs create -o mountpoint=/mnt/rpool_data rpool/data")
|
||||||
machine.succeed("zfs create -o mountpoint=/mnt/test_mountpoint rpool/test")
|
|
||||||
|
|
||||||
machine.succeed("zfsEnsureMounted /mnt/test_mountpoint")
|
machine.succeed("zfs create -o mountpoint=/mnt/rpool2_data rpool2/data")
|
||||||
|
|
||||||
machine.fail("zfsEnsureMounted /mnt/does_not_exist_lol")
|
# Test that valid service starts successfully
|
||||||
machine.fail("zfsEnsureMounted /mnt/test_mountpoint /mnt/does_not_exist_lol")
|
machine.succeed("systemctl start test-service")
|
||||||
|
|
||||||
machine.succeed("zfs create -o mountpoint=/mnt/test_mountpoint_dos rpool/test2")
|
# Manually test our validation logic by checking the debug output
|
||||||
|
zfs_output = machine.succeed("zfs list -H -o name,mountpoint")
|
||||||
|
print("ZFS LIST OUTPUT:")
|
||||||
|
print(zfs_output)
|
||||||
|
|
||||||
machine.succeed("zfsEnsureMounted /mnt/test_mountpoint /mnt/test_mountpoint_dos")
|
dataset = machine.succeed("zfs list -H -o name,mountpoint | awk '/\\/mnt\\/rpool_data/ { print $1 }'")
|
||||||
|
print("DATASET FOR /mnt/rpool_data:")
|
||||||
|
print(dataset)
|
||||||
|
|
||||||
machine.succeed("zfs create -o mountpoint='/mnt/test path with spaces' rpool/test3")
|
# Test that invalid-service mount service fails validation
|
||||||
|
machine.fail("systemctl start invalid-service.service")
|
||||||
|
|
||||||
machine.succeed("zfsEnsureMounted '/mnt/test path with spaces'")
|
# Check the journal for our detailed validation error message
|
||||||
|
journal_output = machine.succeed("journalctl -u invalid-service-mounts.service --no-pager")
|
||||||
|
print("JOURNAL OUTPUT:")
|
||||||
|
print(journal_output)
|
||||||
|
|
||||||
machine.succeed("zfs create -o mountpoint='/mnt/test escaped spaces' rpool/test4")
|
# Verify our validation error is in the journal using Python string matching
|
||||||
machine.succeed("zfsEnsureMounted /mnt/test\ escaped\ spaces")
|
assert "ERROR: ZFS pool mismatch for /mnt/rpool_data" in journal_output
|
||||||
|
assert "Expected pool: rpool2" in journal_output
|
||||||
|
assert "Actual pool: rpool" in journal_output
|
||||||
|
|
||||||
machine.succeed("zfsEnsureMounted /mnt/test_mountpoint '/mnt/test path with spaces' /mnt/test_mountpoint_dos")
|
print("SUCCESS: Runtime validation correctly detected zpool mismatch!")
|
||||||
|
|
||||||
machine.succeed("zfs create -o mountpoint=/mnt/foobar_data rpool/foobar")
|
|
||||||
machine.succeed("systemctl start foobar")
|
|
||||||
|
|
||||||
machine.fail("systemctl start foobarSadge")
|
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user