add llama-server

This commit is contained in:
2025-03-31 03:19:55 -04:00
parent 7d2bb541c3
commit d34793c18f
4 changed files with 89 additions and 0 deletions

28
services/llama-cpp.nix Normal file
View File

@@ -0,0 +1,28 @@
{
pkgs,
service_configs,
config,
inputs,
...
}:
{
services.llama-cpp = {
enable = true;
model = builtins.toString (
pkgs.fetchurl {
url = "https://huggingface.co/bartowski/google_gemma-3-27b-it-GGUF/resolve/main/google_gemma-3-27b-it-IQ4_XS.gguf";
sha256 = "bd2f188c66d8ccb0bffcb0c91e4dbbb72754bb1732e0bca323a2f266a35e01c8";
}
);
port = service_configs.ports.llama_cpp;
host = "0.0.0.0";
package = inputs.llamacpp.packages.${pkgs.system}.default;
extraFlags = [
];
};
services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = ''
reverse_proxy :${builtins.toString config.services.llama-cpp.port}
'';
}