From 006cb53c1eb5547283ab699c5f603f4288248ca1 Mon Sep 17 00:00:00 2001 From: Boqian Date: Wed, 18 Feb 2026 16:17:51 -0500 Subject: [PATCH 1/5] Add Meganova as a provider with 19 open-weight models Adds Meganova AI (https://api.meganova.ai/v1) as an OpenAI-compatible provider with curated open-weight models including DeepSeek, GLM, Qwen, Kimi, MiniMax, MiMo, Llama, and Mistral families. Co-Authored-By: Claude Opus 4.6 --- providers/meganova/logo.svg | 7 ++++++ .../models/MiniMaxAI/MiniMax-M2.1.toml | 21 ++++++++++++++++ .../models/MiniMaxAI/MiniMax-M2.5.toml | 21 ++++++++++++++++ .../models/Qwen/Qwen2.5-VL-32B-Instruct.toml | 22 ++++++++++++++++ .../Qwen/Qwen3-235B-A22B-Instruct-2507.toml | 22 ++++++++++++++++ .../meganova/models/Qwen/Qwen3.5-Plus.toml | 23 +++++++++++++++++ .../models/XiaomiMiMo/MiMo-V2-Flash.toml | 25 +++++++++++++++++++ .../models/deepseek-ai/DeepSeek-R1-0528.toml | 25 +++++++++++++++++++ .../models/deepseek-ai/DeepSeek-V3-0324.toml | 21 ++++++++++++++++ .../models/deepseek-ai/DeepSeek-V3.1.toml | 22 ++++++++++++++++ .../models/deepseek-ai/DeepSeek-V3.2-Exp.toml | 22 ++++++++++++++++ .../models/deepseek-ai/DeepSeek-V3.2.toml | 22 ++++++++++++++++ .../meta-llama/Llama-3.3-70B-Instruct.toml | 21 ++++++++++++++++ .../mistralai/Mistral-Nemo-Instruct-2407.toml | 22 ++++++++++++++++ .../Mistral-Small-3.2-24B-Instruct-2506.toml | 23 +++++++++++++++++ .../models/moonshotai/Kimi-K2-Thinking.toml | 25 +++++++++++++++++++ .../meganova/models/moonshotai/Kimi-K2.5.toml | 22 ++++++++++++++++ .../meganova/models/zai-org/GLM-4.6.toml | 22 ++++++++++++++++ .../meganova/models/zai-org/GLM-4.7.toml | 25 +++++++++++++++++++ providers/meganova/models/zai-org/GLM-5.toml | 24 ++++++++++++++++++ providers/meganova/provider.toml | 5 ++++ 21 files changed, 442 insertions(+) create mode 100644 providers/meganova/logo.svg create mode 100644 providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml create mode 100644 providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml create mode 100644 providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml create mode 100644 providers/meganova/models/Qwen/Qwen3-235B-A22B-Instruct-2507.toml create mode 100644 providers/meganova/models/Qwen/Qwen3.5-Plus.toml create mode 100644 providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml create mode 100644 providers/meganova/models/deepseek-ai/DeepSeek-R1-0528.toml create mode 100644 providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml create mode 100644 providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml create mode 100644 providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml create mode 100644 providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml create mode 100644 providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml create mode 100644 providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml create mode 100644 providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml create mode 100644 providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml create mode 100644 providers/meganova/models/moonshotai/Kimi-K2.5.toml create mode 100644 providers/meganova/models/zai-org/GLM-4.6.toml create mode 100644 providers/meganova/models/zai-org/GLM-4.7.toml create mode 100644 providers/meganova/models/zai-org/GLM-5.toml create mode 100644 providers/meganova/provider.toml diff --git a/providers/meganova/logo.svg b/providers/meganova/logo.svg new file mode 100644 index 000000000..ab294f1e1 --- /dev/null +++ b/providers/meganova/logo.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml new file mode 100644 index 000000000..671a11863 --- /dev/null +++ b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml @@ -0,0 +1,21 @@ +name = "MiniMax M2.1" +family = "minimax" +release_date = "2025-12-23" +last_updated = "2025-12-23" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[cost] +input = 0.30 +output = 1.20 + +[limit] +context = 204_800 +output = 131_072 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml b/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml new file mode 100644 index 000000000..5605eeca2 --- /dev/null +++ b/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml @@ -0,0 +1,21 @@ +name = "MiniMax M2.5" +family = "minimax" +release_date = "2026-02-12" +last_updated = "2026-02-12" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = true + +[cost] +input = 0.30 +output = 1.20 + +[limit] +context = 204_800 +output = 131_072 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml b/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml new file mode 100644 index 000000000..e84fac0d5 --- /dev/null +++ b/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml @@ -0,0 +1,22 @@ +name = "Qwen2.5 VL 32B Instruct" +family = "qwen" +release_date = "2025-03-24" +last_updated = "2025-03-24" +attachment = true +reasoning = false +temperature = true +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.27 +output = 0.27 + +[limit] +context = 131_000 +output = 131_000 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/meganova/models/Qwen/Qwen3-235B-A22B-Instruct-2507.toml b/providers/meganova/models/Qwen/Qwen3-235B-A22B-Instruct-2507.toml new file mode 100644 index 000000000..157c43749 --- /dev/null +++ b/providers/meganova/models/Qwen/Qwen3-235B-A22B-Instruct-2507.toml @@ -0,0 +1,22 @@ +name = "Qwen3 235B A22B Instruct 2507" +family = "qwen" +release_date = "2025-07-23" +last_updated = "2025-07-23" +attachment = false +reasoning = false +temperature = true +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.09 +output = 0.60 + +[limit] +context = 262_000 +output = 262_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/Qwen/Qwen3.5-Plus.toml b/providers/meganova/models/Qwen/Qwen3.5-Plus.toml new file mode 100644 index 000000000..dbd174a61 --- /dev/null +++ b/providers/meganova/models/Qwen/Qwen3.5-Plus.toml @@ -0,0 +1,23 @@ +name = "Qwen3.5 Plus" +family = "qwen" +release_date = "2026-02" +last_updated = "2026-02" +attachment = false +reasoning = true +temperature = true +knowledge = "2025-04" +tool_call = true +open_weights = false + +[cost] +input = 0.40 +output = 2.40 +reasoning = 2.40 + +[limit] +context = 1_000_000 +output = 65_536 + +[modalities] +input = ["text", "image", "video"] +output = ["text"] diff --git a/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml new file mode 100644 index 000000000..bc2a45cb5 --- /dev/null +++ b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml @@ -0,0 +1,25 @@ +name = "MiMo V2 Flash" +family = "mimo" +release_date = "2025-12-17" +last_updated = "2025-12-17" +attachment = false +reasoning = true +temperature = true +knowledge = "2024-12-01" +tool_call = true +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0.07 +output = 0.21 + +[limit] +context = 256_000 +output = 32_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-R1-0528.toml b/providers/meganova/models/deepseek-ai/DeepSeek-R1-0528.toml new file mode 100644 index 000000000..edc542d88 --- /dev/null +++ b/providers/meganova/models/deepseek-ai/DeepSeek-R1-0528.toml @@ -0,0 +1,25 @@ +name = "DeepSeek R1 0528" +family = "deepseek-thinking" +release_date = "2025-05-28" +last_updated = "2025-05-28" +attachment = false +reasoning = true +temperature = true +knowledge = "2024-07" +tool_call = false +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0.50 +output = 2.15 + +[limit] +context = 163_840 +output = 64_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml new file mode 100644 index 000000000..6ddd83bc6 --- /dev/null +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml @@ -0,0 +1,21 @@ +name = "DeepSeek V3 0324" +family = "deepseek" +release_date = "2025-03-24" +last_updated = "2025-03-24" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = true + +[cost] +input = 0.20 +output = 0.80 + +[limit] +context = 163_840 +output = 163_840 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml new file mode 100644 index 000000000..e49acfd00 --- /dev/null +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml @@ -0,0 +1,22 @@ +name = "DeepSeek V3.1" +family = "deepseek" +release_date = "2025-08-25" +last_updated = "2025-08-25" +attachment = false +reasoning = true +temperature = true +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.27 +output = 1.00 + +[limit] +context = 164_000 +output = 164_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml new file mode 100644 index 000000000..9bfa73b56 --- /dev/null +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml @@ -0,0 +1,22 @@ +name = "DeepSeek V3.2 Exp" +family = "deepseek" +release_date = "2025-10-10" +last_updated = "2025-10-10" +attachment = false +reasoning = true +temperature = true +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.27 +output = 0.41 + +[limit] +context = 164_000 +output = 164_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml new file mode 100644 index 000000000..0dd71e0e2 --- /dev/null +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml @@ -0,0 +1,22 @@ +name = "DeepSeek V3.2" +family = "deepseek" +release_date = "2025-12-03" +last_updated = "2025-12-03" +attachment = false +reasoning = true +temperature = true +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.27 +output = 0.42 + +[limit] +context = 164_000 +output = 164_000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml b/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml new file mode 100644 index 000000000..33e44472b --- /dev/null +++ b/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml @@ -0,0 +1,21 @@ +name = "Llama 3.3 70B Instruct" +family = "llama" +release_date = "2024-12-06" +last_updated = "2024-12-06" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = true + +[cost] +input = 0.10 +output = 0.32 + +[limit] +context = 131_072 +output = 16_384 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml b/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml new file mode 100644 index 000000000..a95580260 --- /dev/null +++ b/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml @@ -0,0 +1,22 @@ +name = "Mistral Nemo Instruct 2407" +family = "mistral" +release_date = "2024-07-18" +last_updated = "2024-07-18" +attachment = false +reasoning = false +tool_call = true +structured_output = true +temperature = true +open_weights = true + +[cost] +input = 0.14 +output = 0.14 + +[limit] +context = 65_536 +output = 65_536 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml b/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml new file mode 100644 index 000000000..231a67251 --- /dev/null +++ b/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml @@ -0,0 +1,23 @@ +name = "Mistral Small 3.2 24B Instruct" +family = "mistral-small" +release_date = "2025-06-20" +last_updated = "2025-06-20" +attachment = true +reasoning = false +temperature = true +knowledge = "2024-10" +tool_call = true +structured_output = true +open_weights = true + +[cost] +input = 0.10 +output = 0.30 + +[limit] +context = 96_000 +output = 8_192 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml b/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml new file mode 100644 index 000000000..dcad9b1c2 --- /dev/null +++ b/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml @@ -0,0 +1,25 @@ +name = "Kimi K2 Thinking" +family = "kimi-thinking" +release_date = "2025-11-06" +last_updated = "2025-11-06" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2024-08" +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0.60 +output = 2.50 + +[limit] +context = 262_144 +output = 262_144 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/moonshotai/Kimi-K2.5.toml b/providers/meganova/models/moonshotai/Kimi-K2.5.toml new file mode 100644 index 000000000..d2114f7c3 --- /dev/null +++ b/providers/meganova/models/moonshotai/Kimi-K2.5.toml @@ -0,0 +1,22 @@ +name = "Kimi K2.5" +family = "kimi" +release_date = "2026-01-27" +last_updated = "2026-01-27" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2026-01" +open_weights = true + +[cost] +input = 0.50 +output = 2.80 + +[limit] +context = 262_144 +output = 262_144 + +[modalities] +input = ["text", "image"] +output = ["text"] diff --git a/providers/meganova/models/zai-org/GLM-4.6.toml b/providers/meganova/models/zai-org/GLM-4.6.toml new file mode 100644 index 000000000..b68c9ed48 --- /dev/null +++ b/providers/meganova/models/zai-org/GLM-4.6.toml @@ -0,0 +1,22 @@ +name = "GLM-4.6" +family = "glm" +release_date = "2025-09-30" +last_updated = "2025-09-30" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2025-04" +open_weights = true + +[cost] +input = 0.60 +output = 2.20 + +[limit] +context = 204_800 +output = 131_072 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/zai-org/GLM-4.7.toml b/providers/meganova/models/zai-org/GLM-4.7.toml new file mode 100644 index 000000000..4f977b4ee --- /dev/null +++ b/providers/meganova/models/zai-org/GLM-4.7.toml @@ -0,0 +1,25 @@ +name = "GLM-4.7" +family = "glm" +release_date = "2025-12-22" +last_updated = "2025-12-22" +attachment = false +reasoning = true +temperature = true +tool_call = true +knowledge = "2025-04" +open_weights = true + +[interleaved] +field = "reasoning_content" + +[cost] +input = 0.60 +output = 2.20 + +[limit] +context = 204_800 +output = 131_072 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/models/zai-org/GLM-5.toml b/providers/meganova/models/zai-org/GLM-5.toml new file mode 100644 index 000000000..9a6c6dd73 --- /dev/null +++ b/providers/meganova/models/zai-org/GLM-5.toml @@ -0,0 +1,24 @@ +name = "GLM-5" +family = "glm" +release_date = "2026-02-11" +last_updated = "2026-02-11" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[interleaved] +field = "reasoning_content" + +[cost] +input = 1.00 +output = 3.20 + +[limit] +context = 204_800 +output = 131_072 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/meganova/provider.toml b/providers/meganova/provider.toml new file mode 100644 index 000000000..1afd1ae49 --- /dev/null +++ b/providers/meganova/provider.toml @@ -0,0 +1,5 @@ +name = "Meganova" +env = ["MEGANOVA_API_KEY"] +npm = "@ai-sdk/openai-compatible" +api = "https://api.meganova.ai/v1" +doc = "https://docs.meganova.ai" From a2f8234c8ee32bc30e49b4bc8b5f14b5a2ae8dc9 Mon Sep 17 00:00:00 2001 From: Boqian Date: Wed, 18 Feb 2026 16:25:43 -0500 Subject: [PATCH 2/5] Update pricing and context limits from Meganova API Use actual pricing from https://api.meganova.ai/v1/models instead of reference data from other providers. Co-Authored-By: Claude Opus 4.6 --- providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml | 4 ++-- .../meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml | 8 ++++---- providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml | 6 +++--- .../meganova/models/deepseek-ai/DeepSeek-V3-0324.toml | 4 ++-- .../meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml | 2 +- providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml | 4 ++-- .../models/meta-llama/Llama-3.3-70B-Instruct.toml | 2 +- .../models/mistralai/Mistral-Nemo-Instruct-2407.toml | 6 +++--- .../mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml | 6 +++--- .../meganova/models/moonshotai/Kimi-K2-Thinking.toml | 2 +- providers/meganova/models/moonshotai/Kimi-K2.5.toml | 2 +- providers/meganova/models/zai-org/GLM-4.6.toml | 6 +++--- providers/meganova/models/zai-org/GLM-4.7.toml | 6 +++--- providers/meganova/models/zai-org/GLM-5.toml | 6 +++--- 14 files changed, 32 insertions(+), 32 deletions(-) diff --git a/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml index 671a11863..30eec950b 100644 --- a/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml +++ b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml @@ -9,11 +9,11 @@ tool_call = true open_weights = true [cost] -input = 0.30 +input = 0.28 output = 1.20 [limit] -context = 204_800 +context = 196_608 output = 131_072 [modalities] diff --git a/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml b/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml index e84fac0d5..94560e560 100644 --- a/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml +++ b/providers/meganova/models/Qwen/Qwen2.5-VL-32B-Instruct.toml @@ -10,12 +10,12 @@ structured_output = true open_weights = true [cost] -input = 0.27 -output = 0.27 +input = 0.20 +output = 0.60 [limit] -context = 131_000 -output = 131_000 +context = 16_384 +output = 16_384 [modalities] input = ["text", "image"] diff --git a/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml index bc2a45cb5..2a8f22b62 100644 --- a/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml +++ b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml @@ -13,11 +13,11 @@ open_weights = true field = "reasoning_content" [cost] -input = 0.07 -output = 0.21 +input = 0.10 +output = 0.30 [limit] -context = 256_000 +context = 262_144 output = 32_000 [modalities] diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml index 6ddd83bc6..546ee80df 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3-0324.toml @@ -9,8 +9,8 @@ tool_call = true open_weights = true [cost] -input = 0.20 -output = 0.80 +input = 0.25 +output = 0.88 [limit] context = 163_840 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml index 9bfa73b56..fb8b26602 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml @@ -11,7 +11,7 @@ open_weights = true [cost] input = 0.27 -output = 0.41 +output = 0.40 [limit] context = 164_000 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml index 0dd71e0e2..875b90cff 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml @@ -10,8 +10,8 @@ structured_output = true open_weights = true [cost] -input = 0.27 -output = 0.42 +input = 0.26 +output = 0.38 [limit] context = 164_000 diff --git a/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml b/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml index 33e44472b..de3b811a1 100644 --- a/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml +++ b/providers/meganova/models/meta-llama/Llama-3.3-70B-Instruct.toml @@ -10,7 +10,7 @@ open_weights = true [cost] input = 0.10 -output = 0.32 +output = 0.30 [limit] context = 131_072 diff --git a/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml b/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml index a95580260..6d2c00309 100644 --- a/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml +++ b/providers/meganova/models/mistralai/Mistral-Nemo-Instruct-2407.toml @@ -10,11 +10,11 @@ temperature = true open_weights = true [cost] -input = 0.14 -output = 0.14 +input = 0.02 +output = 0.04 [limit] -context = 65_536 +context = 131_072 output = 65_536 [modalities] diff --git a/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml b/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml index 231a67251..57a5d2b44 100644 --- a/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml +++ b/providers/meganova/models/mistralai/Mistral-Small-3.2-24B-Instruct-2506.toml @@ -11,11 +11,11 @@ structured_output = true open_weights = true [cost] -input = 0.10 -output = 0.30 +input = 0 +output = 0 [limit] -context = 96_000 +context = 32_768 output = 8_192 [modalities] diff --git a/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml b/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml index dcad9b1c2..4c28538dd 100644 --- a/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml +++ b/providers/meganova/models/moonshotai/Kimi-K2-Thinking.toml @@ -14,7 +14,7 @@ field = "reasoning_content" [cost] input = 0.60 -output = 2.50 +output = 2.60 [limit] context = 262_144 diff --git a/providers/meganova/models/moonshotai/Kimi-K2.5.toml b/providers/meganova/models/moonshotai/Kimi-K2.5.toml index d2114f7c3..956fe0794 100644 --- a/providers/meganova/models/moonshotai/Kimi-K2.5.toml +++ b/providers/meganova/models/moonshotai/Kimi-K2.5.toml @@ -10,7 +10,7 @@ knowledge = "2026-01" open_weights = true [cost] -input = 0.50 +input = 0.45 output = 2.80 [limit] diff --git a/providers/meganova/models/zai-org/GLM-4.6.toml b/providers/meganova/models/zai-org/GLM-4.6.toml index b68c9ed48..b6ffe49a2 100644 --- a/providers/meganova/models/zai-org/GLM-4.6.toml +++ b/providers/meganova/models/zai-org/GLM-4.6.toml @@ -10,11 +10,11 @@ knowledge = "2025-04" open_weights = true [cost] -input = 0.60 -output = 2.20 +input = 0.45 +output = 1.90 [limit] -context = 204_800 +context = 202_752 output = 131_072 [modalities] diff --git a/providers/meganova/models/zai-org/GLM-4.7.toml b/providers/meganova/models/zai-org/GLM-4.7.toml index 4f977b4ee..caf9fb02e 100644 --- a/providers/meganova/models/zai-org/GLM-4.7.toml +++ b/providers/meganova/models/zai-org/GLM-4.7.toml @@ -13,11 +13,11 @@ open_weights = true field = "reasoning_content" [cost] -input = 0.60 -output = 2.20 +input = 0.20 +output = 0.80 [limit] -context = 204_800 +context = 202_752 output = 131_072 [modalities] diff --git a/providers/meganova/models/zai-org/GLM-5.toml b/providers/meganova/models/zai-org/GLM-5.toml index 9a6c6dd73..df55f613d 100644 --- a/providers/meganova/models/zai-org/GLM-5.toml +++ b/providers/meganova/models/zai-org/GLM-5.toml @@ -12,11 +12,11 @@ open_weights = false field = "reasoning_content" [cost] -input = 1.00 -output = 3.20 +input = 0.80 +output = 2.56 [limit] -context = 204_800 +context = 202_752 output = 131_072 [modalities] From 6e02385a6ba2942c1e431c0011486df2d3ac6b24 Mon Sep 17 00:00:00 2001 From: Boqian Date: Wed, 18 Feb 2026 16:29:41 -0500 Subject: [PATCH 3/5] Add interleaved reasoning_content to DeepSeek V3.1, V3.2, V3.2-Exp These models support interleaved reasoning output, matching how other providers (deepinfra, baseten, chutes) configure them. Co-Authored-By: Claude Opus 4.6 --- providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml | 3 +++ providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml | 3 +++ providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml | 3 +++ 3 files changed, 9 insertions(+) diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml index e49acfd00..eba3f308c 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml @@ -9,6 +9,9 @@ tool_call = true structured_output = true open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.27 output = 1.00 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml index fb8b26602..aca000c2e 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml @@ -9,6 +9,9 @@ tool_call = true structured_output = true open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.27 output = 0.40 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml index 875b90cff..39b926fbc 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml @@ -9,6 +9,9 @@ tool_call = true structured_output = true open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.26 output = 0.38 From 2d83b96bb18a9f6e5e0cd6b66e693a69014cef86 Mon Sep 17 00:00:00 2001 From: Boqian Date: Wed, 18 Feb 2026 16:35:51 -0500 Subject: [PATCH 4/5] Fix interleaved reasoning_content based on Meganova API testing Tested each model with include_reasoning=true against the live API. Added [interleaved] to: GLM-4.6, MiniMax-M2.1, MiniMax-M2.5, Kimi-K2.5 Removed [interleaved] from: DeepSeek-V3.1, V3.2, V3.2-Exp, MiMo-V2-Flash Co-Authored-By: Claude Opus 4.6 --- providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml | 3 +++ providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml | 3 +++ providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml | 3 --- providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml | 3 --- providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml | 3 --- providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml | 3 --- providers/meganova/models/moonshotai/Kimi-K2.5.toml | 3 +++ providers/meganova/models/zai-org/GLM-4.6.toml | 3 +++ 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml index 30eec950b..c41250ad5 100644 --- a/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml +++ b/providers/meganova/models/MiniMaxAI/MiniMax-M2.1.toml @@ -8,6 +8,9 @@ temperature = true tool_call = true open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.28 output = 1.20 diff --git a/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml b/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml index 5605eeca2..7242bab2d 100644 --- a/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml +++ b/providers/meganova/models/MiniMaxAI/MiniMax-M2.5.toml @@ -8,6 +8,9 @@ temperature = true tool_call = true open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.30 output = 1.20 diff --git a/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml index 2a8f22b62..ba5a51086 100644 --- a/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml +++ b/providers/meganova/models/XiaomiMiMo/MiMo-V2-Flash.toml @@ -9,9 +9,6 @@ knowledge = "2024-12-01" tool_call = true open_weights = true -[interleaved] -field = "reasoning_content" - [cost] input = 0.10 output = 0.30 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml index eba3f308c..e49acfd00 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml @@ -9,9 +9,6 @@ tool_call = true structured_output = true open_weights = true -[interleaved] -field = "reasoning_content" - [cost] input = 0.27 output = 1.00 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml index aca000c2e..fb8b26602 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml @@ -9,9 +9,6 @@ tool_call = true structured_output = true open_weights = true -[interleaved] -field = "reasoning_content" - [cost] input = 0.27 output = 0.40 diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml index 39b926fbc..875b90cff 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml @@ -9,9 +9,6 @@ tool_call = true structured_output = true open_weights = true -[interleaved] -field = "reasoning_content" - [cost] input = 0.26 output = 0.38 diff --git a/providers/meganova/models/moonshotai/Kimi-K2.5.toml b/providers/meganova/models/moonshotai/Kimi-K2.5.toml index 956fe0794..d58969896 100644 --- a/providers/meganova/models/moonshotai/Kimi-K2.5.toml +++ b/providers/meganova/models/moonshotai/Kimi-K2.5.toml @@ -9,6 +9,9 @@ tool_call = true knowledge = "2026-01" open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.45 output = 2.80 diff --git a/providers/meganova/models/zai-org/GLM-4.6.toml b/providers/meganova/models/zai-org/GLM-4.6.toml index b6ffe49a2..7605a33a9 100644 --- a/providers/meganova/models/zai-org/GLM-4.6.toml +++ b/providers/meganova/models/zai-org/GLM-4.6.toml @@ -9,6 +9,9 @@ tool_call = true knowledge = "2025-04" open_weights = true +[interleaved] +field = "reasoning_content" + [cost] input = 0.45 output = 1.90 From 92d9e89690413fb635472e13b38f978c1edc7cda Mon Sep 17 00:00:00 2001 From: Boqian Date: Wed, 18 Feb 2026 16:41:07 -0500 Subject: [PATCH 5/5] Set reasoning=false for DeepSeek V3 series V3-0324, V3.1, V3.2, V3.2-Exp are chat models, not reasoning models. Only DeepSeek-R1 is a reasoning model. Co-Authored-By: Claude Opus 4.6 --- providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml | 2 +- providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml | 2 +- providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml index e49acfd00..d4b20edb1 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.1.toml @@ -3,7 +3,7 @@ family = "deepseek" release_date = "2025-08-25" last_updated = "2025-08-25" attachment = false -reasoning = true +reasoning = false temperature = true tool_call = true structured_output = true diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml index fb8b26602..c2e22c13e 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2-Exp.toml @@ -3,7 +3,7 @@ family = "deepseek" release_date = "2025-10-10" last_updated = "2025-10-10" attachment = false -reasoning = true +reasoning = false temperature = true tool_call = true structured_output = true diff --git a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml index 875b90cff..99faceba9 100644 --- a/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml +++ b/providers/meganova/models/deepseek-ai/DeepSeek-V3.2.toml @@ -3,7 +3,7 @@ family = "deepseek" release_date = "2025-12-03" last_updated = "2025-12-03" attachment = false -reasoning = true +reasoning = false temperature = true tool_call = true structured_output = true