llama.cpp things
This commit is contained in:
@@ -24,7 +24,7 @@
|
||||
## Behavior
|
||||
Do not be sycophantic in your responses.
|
||||
Do not use emojis unless explicitly asked to. This includes in code.
|
||||
ALWAYS test code before returning to the user with something you think works. Always verify your work.
|
||||
Use Test Driven Development methodology.
|
||||
|
||||
## Nix
|
||||
For using `nix build` append `-L` to get better visibility into the logs.
|
||||
@@ -39,8 +39,7 @@
|
||||
|
||||
autoshare = false;
|
||||
autoupdate = true;
|
||||
agent = {
|
||||
};
|
||||
agent = { };
|
||||
provider = {
|
||||
openrouter = {
|
||||
models = {
|
||||
@@ -52,6 +51,17 @@
|
||||
apiKey = "{file:${../secrets/openrouter_api_key}}";
|
||||
};
|
||||
};
|
||||
# Local llama-cpp server with OpenAI-compatible API
|
||||
"llama.cpp" = {
|
||||
npm = "@ai-sdk/openai-compatible";
|
||||
options = {
|
||||
baseURL = "http://127.0.0.1:8012/v1";
|
||||
};
|
||||
|
||||
models = {
|
||||
"local" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user