llama-cpp: use q8 quantization instead of q4

This commit is contained in:
Simon Gardling 2025-05-28 21:20:42 -07:00
parent 4dc577fdcb
commit 7c217d6ead
Signed by: titaniumtown
GPG Key ID: 9AB28AC10ECE533D

View File

@ -12,8 +12,8 @@
enable = true;
model = builtins.toString (
pkgs.fetchurl {
url = "https://huggingface.co/bartowski/nvidia_AceReason-Nemotron-7B-GGUF/resolve/main/nvidia_AceReason-Nemotron-7B-Q4_0.gguf";
sha256 = "27f93349ea88f3c84e53469288ac2ac3f5c985de9f8e00e275870e7e524bb3d8";
url = "https://huggingface.co/bartowski/nvidia_AceReason-Nemotron-7B-GGUF/resolve/main/nvidia_AceReason-Nemotron-7B-Q8_0.gguf";
sha256 = "0d5eb8b46490af7c097357cb20ad215ebfd30efacedac58bf68a8c7d84e996fc";
}
);
port = service_configs.ports.llama_cpp;