server-config/services/llama-cpp.nix

44 lines
1.1 KiB
Nix

{
pkgs,
service_configs,
config,
inputs,
lib,
...
}:
{
services.llama-cpp = {
enable = true;
model = builtins.toString (
pkgs.fetchurl {
url = "https://huggingface.co/ggml-org/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-mxfp4.gguf";
sha256 = "be37a636aca0fc1aae0d32325f82f6b4d21495f06823b5fbc1898ae0303e9935";
}
);
port = service_configs.ports.llama_cpp;
host = "0.0.0.0";
# vulkan broken: https://github.com/ggml-org/llama.cpp/issues/13801
package = (
lib.optimizePackage (
inputs.llamacpp.packages.${pkgs.system}.vulkan.overrideAttrs (old: {
postPatch = "";
})
)
);
extraFlags = [
"-ngl"
"5"
"-c"
"16384"
];
};
# have to do this in order to get vulkan to work
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = ''
${builtins.readFile ../secrets/caddy_auth}
reverse_proxy :${builtins.toString config.services.llama-cpp.port}
'';
}