159 lines
5.4 KiB
Nix

{ pkgs, lib, ... }:
let
models = [
# {
# name = "Qwen2.5-14B-Instruct-Q4_K_S.gguf";
# context_length = "32768";
# gen_length = "8192";
# source = pkgs.fetchurl {
# url = "https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF/resolve/main/Qwen2.5-14B-Instruct-Q4_K_S.gguf?download=true";
# sha256 = "E1CmWUhMMbTXEjIRczzA3rSrVuR8qOL8BLagw7LiyZk=";
# };
# }
{
name = "Replete-LLM-V2.5-Qwen-14b-Q4_K_S.gguf";
context_length = "32768";
gen_length = "8192";
source = pkgs.fetchurl {
url = "https://huggingface.co/bartowski/Replete-LLM-V2.5-Qwen-14b-GGUF/resolve/main/Replete-LLM-V2.5-Qwen-14b-Q4_K_S.gguf?download=true";
sha256 = "/Oa1y4WVRGQkLEt5Sxxyt5plN5+tDFblLShPhMtzs7k=";
};
}
# {
# name = "Qwen2.5-7B-Instruct-Q6_K_L.gguf";
# context_length = "32768";
# gen_length = "8192";
# source = pkgs.fetchurl {
# url = "https://huggingface.co/bartowski/Qwen2.5-7B-Instruct-GGUF/resolve/main/Qwen2.5-7B-Instruct-Q6_K_L.gguf?download=true";
# sha256 = "thEXN06T/UVGfzdB83jlgpG7kuTzZtz1ZUAdupAnErM=";
# };
# }
# {
# name = "Replete-LLM-V2.5-Qwen-7b-Q6_K_L.gguf";
# context_length = "32768";
# gen_length = "8192";
# source = pkgs.fetchurl {
# url = "https://huggingface.co/bartowski/Replete-LLM-V2.5-Qwen-7b-GGUF/resolve/main/Replete-LLM-V2.5-Qwen-7b-Q6_K_L.gguf?download=true";
# sha256 = "dR7M5GKfGdiPI9mqBSH6naVr8XzuCjLLv514VYXSikg=";
# };
# }
];
# stolen from: https://stackoverflow.com/a/42398526
optimizeWithFlags =
pkg: flags:
pkgs.lib.overrideDerivation pkg (
old:
let
newflags = pkgs.lib.foldl' (acc: x: "${acc} ${x}") "" flags;
oldflags = if (pkgs.lib.hasAttr "NIX_CFLAGS_COMPILE" old) then "${old.NIX_CFLAGS_COMPILE}" else "";
in
{
NIX_CFLAGS_COMPILE = "${oldflags} ${newflags}";
stdenv = pkgs.clangStdenv;
}
);
model_files = builtins.listToAttrs (
map (f: {
name = ".local/share/nomic.ai/GPT4All/${f.name}";
value.source = f.source;
}) models
);
gpt4all_package = (
optimizeWithFlags
(pkgs.gpt4all.overrideAttrs (old: {
version = "3.4.0-dev0";
src = pkgs.fetchFromGitHub {
fetchSubmodules = true;
owner = "nomic-ai";
repo = "gpt4all";
rev = "HEAD";
sha256 = "vzAfWLyljR0Cg6pCeeyQJZNeOJh5xOILlbJnwDWu62o=";
};
cmakeFlags = old.cmakeFlags ++ [
"-DGGML_VULKAN=ON"
"-DGGML_KOMPUTE=ON"
"-DGGML_BLAS=ON"
"-DGGML_VENDOR=Intel"
];
nativeBuildInputs = old.nativeBuildInputs ++ [
pkgs.pkg-config
];
buildInputs = old.buildInputs ++ [
pkgs.mkl
pkgs.blas
];
patches = old.patches ++ [
./gpt4all-HEAD-disable-settings-err.patch
];
}))
# compile flags
[
"-Ofast"
"-march=native"
"-mtune=native"
"-fno-protect-parens"
"-fno-finite-math-only" # https://github.com/ggerganov/llama.cpp/pull/7154#issuecomment-2143844461
]
);
in
{
nixpkgs.config.allowUnfree = true;
home.packages = [
gpt4all_package
];
home.file = lib.recursiveUpdate {
".config/nomic.ai/GPT4All.ini".text =
let
system_prompt = "You are an expert LLM who is thoughtful and works step-by-step from first principles derive an answer to the user's prompt. For each step, title the step and begin showing your work, then decide if your work is comprehensive and if you're ready to provide your final answer. Make sure to exhaust ALL POSSIBILITIES before answering. While your reasoning is not shown to the user, it is under high levels of scrutiny to ensure high-quality reasoning. INTERNAL REASONING STEPS ARE NOT SHOWN TO THE USER, ONLY A \"Final Answer\" SECTION WILL BE SHOWN TO THE USER.
USE AS MANY REASONING STEPS AS POSSIBLE. AT LEAST 3. EXPLORE ALTERNATE ANSWERS AND CONSIDER THAT YOUR ANSWER MAY BE WRONG. IDENTIFY POSSIBLE ERRORS IN YOUR REASONING AND WHERE SUCH ERRORS MAY BE. FULLY TEST ALL OTHER POSSIBILITIES. YOU CAN BE WRONG. WHEN YOU SAY YOU ARE RE-EXAMINING, ACTUALLY RE-EXAMINE, AND USE ANOTHER APPROACH TO DO SO. BE VERBOSE IN YOUR REASONING AND RE-EXAMINING.";
in
''
[General]
chatTheme=Dark
height=940
suggestionMode=Off
threadCount=8
uniqueId=7096f2d2-448d-4272-a132-d37e77f8a781
userDefaultModel=${
# select the first element of `models` to be the default model
(builtins.elemAt models 0).name
}
width=1472
x=0
y=0
[download]
lastVersionStarted=${gpt4all_package.version}
''
+ (lib.concatStrings (
map (model: ''
[model-${model.name}]
contextLength=${model.context_length}
filename=${model.name}
maxLength=${model.gen_length}
promptBatchSize=256
promptTemplate=<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n
systemPrompt="<|im_start|>system\n${
# replace newlines with the string "\n" for gpt4all to properly parse
builtins.replaceStrings [ "\n" ] [ "\\n" ] system_prompt
}<|im_end|>
\n"
'') models
))
+ ''
[network]
isActive=true
usageStatsActive=true
'';
} model_files;
}