{ pkgs, service_configs, config, inputs, lib, ... }: { services.llama-cpp = { enable = true; model = builtins.toString ( pkgs.fetchurl { url = "https://huggingface.co/ibm-granite/granite-4.0-h-tiny-GGUF/resolve/main/granite-4.0-h-tiny-Q4_0.gguf"; sha256 = "7ced406c5ee7ae653b772d56aa79ca32997d9e3da69d97b2c8a66dd5e7d5f77d"; } ); port = service_configs.ports.llama_cpp; host = "0.0.0.0"; # vulkan broken: https://github.com/ggml-org/llama.cpp/issues/13801 package = ( lib.optimizePackage ( inputs.llamacpp.packages.${pkgs.system}.default.overrideAttrs (old: { postPatch = ""; }) ) ); extraFlags = [ # "-ngl" # "9999" ]; }; # have to do this in order to get vulkan to work systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false; services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = '' ${builtins.readFile ../secrets/caddy_auth} reverse_proxy :${builtins.toString config.services.llama-cpp.port} ''; }