opencode: don't use glm-4.7-flash
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
let
|
||||
# what model should be used in place of haiku?
|
||||
# glm 4.7-flash is an example
|
||||
haiku-model = "openrouter/z-ai/glm-4.7-flash";
|
||||
haiku-model = "openrouter/z-ai/glm-4.7";
|
||||
|
||||
opus-model = "openrouter/moonshotai/kimi-k2.5";
|
||||
|
||||
@@ -147,7 +147,6 @@ in
|
||||
models = {
|
||||
"openai/gpt-oss-20b:free" = { };
|
||||
"minimax/minimax-m2.1" = { };
|
||||
"z-ai/glm-4.7-flash" = { };
|
||||
};
|
||||
options = {
|
||||
# TODO! use agenix here instead
|
||||
|
||||
Reference in New Issue
Block a user