Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Want to know more about its architecture and how it works? You can read it [here

## ✨ Features

🤖 **Support for all major AI providers** - Use local LLMs through Ollama or connect to OpenAI, Anthropic Claude, Google Gemini, Groq, and more. Mix and match models based on your needs.
🤖 **Support for all major AI providers** - Use local LLMs through Ollama or connect to OpenAI, Anthropic Claude, Google Gemini, Groq, MiniMax, and more. Mix and match models based on your needs.

⚡ **Smart search modes** - Choose Speed Mode when you need quick answers, Balanced Mode for everyday searches, or Quality Mode for deep research.

Expand Down
2 changes: 2 additions & 0 deletions src/lib/models/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import GroqProvider from './groq';
import LemonadeProvider from './lemonade';
import AnthropicProvider from './anthropic';
import LMStudioProvider from './lmstudio';
import MiniMaxProvider from './minimax';

export const providers: Record<string, ProviderConstructor<any>> = {
openai: OpenAIProvider,
Expand All @@ -18,6 +19,7 @@ export const providers: Record<string, ProviderConstructor<any>> = {
lemonade: LemonadeProvider,
anthropic: AnthropicProvider,
lmstudio: LMStudioProvider,
minimax: MiniMaxProvider,
};

export const getModelProvidersUIConfigSection =
Expand Down
111 changes: 111 additions & 0 deletions src/lib/models/providers/minimax/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import { UIConfigField } from '@/lib/config/types';
import { getConfiguredModelProviderById } from '@/lib/config/serverRegistry';
import { Model, ModelList, ProviderMetadata } from '../../types';
import BaseEmbedding from '../../base/embedding';
import BaseModelProvider from '../../base/provider';
import BaseLLM from '../../base/llm';
import MiniMaxLLM from './miniMaxLLM';

interface MiniMaxConfig {
apiKey: string;
baseURL?: string;
}

const DEFAULT_CHAT_MODELS: Model[] = [
{ key: 'MiniMax-M2.7', name: 'MiniMax M2.7' },
{ key: 'MiniMax-M2.5', name: 'MiniMax M2.5' },
{ key: 'MiniMax-M2.5-highspeed', name: 'MiniMax M2.5 High Speed' },
];

const providerConfigFields: UIConfigField[] = [
{
type: 'password',
name: 'API Key',
key: 'apiKey',
description: 'Your MiniMax API key',
required: true,
placeholder: 'MiniMax API Key',
env: 'MINIMAX_API_KEY',
scope: 'server',
},
{
type: 'string',
name: 'Base URL',
key: 'baseURL',
description: 'MiniMax API base URL (default: https://api.minimax.io/v1)',
required: false,
placeholder: 'https://api.minimax.io/v1',
env: 'MINIMAX_BASE_URL',
scope: 'server',
},
];

class MiniMaxProvider extends BaseModelProvider<MiniMaxConfig> {
constructor(id: string, name: string, config: MiniMaxConfig) {
super(id, name, config);
}

async getDefaultModels(): Promise<ModelList> {
return {
embedding: [],
chat: DEFAULT_CHAT_MODELS,
};
}

async getModelList(): Promise<ModelList> {
const defaultModels = await this.getDefaultModels();
const configProvider = getConfiguredModelProviderById(this.id)!;

return {
embedding: [],
chat: [...defaultModels.chat, ...configProvider.chatModels],
};
}

async loadChatModel(key: string): Promise<BaseLLM<any>> {
const modelList = await this.getModelList();

const exists = modelList.chat.find((m) => m.key === key);

if (!exists) {
throw new Error(
'Error Loading MiniMax Chat Model. Invalid Model Selected',
);
}

return new MiniMaxLLM({
apiKey: this.config.apiKey,
model: key,
baseURL: this.config.baseURL || 'https://api.minimax.io/v1',
});
}

async loadEmbeddingModel(key: string): Promise<BaseEmbedding<any>> {
throw new Error('MiniMax provider does not support embedding models.');
}

static parseAndValidate(raw: any): MiniMaxConfig {
if (!raw || typeof raw !== 'object')
throw new Error('Invalid config provided. Expected object');
if (!raw.apiKey)
throw new Error('Invalid config provided. API key must be provided');

return {
apiKey: String(raw.apiKey),
...(raw.baseURL && { baseURL: String(raw.baseURL) }),
};
}

static getProviderConfigFields(): UIConfigField[] {
return providerConfigFields;
}

static getProviderMetadata(): ProviderMetadata {
return {
key: 'minimax',
name: 'MiniMax',
};
}
}

export default MiniMaxProvider;
131 changes: 131 additions & 0 deletions src/lib/models/providers/minimax/miniMaxLLM.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
import OpenAILLM from '../openai/openaiLLM';
import {
GenerateObjectInput,
GenerateOptions,
GenerateTextInput,
GenerateTextOutput,
StreamTextOutput,
} from '../../types';
import z from 'zod';
import { parse } from 'partial-json';
import { repairJson } from '@toolsycc/json-repair';

class MiniMaxLLM extends OpenAILLM {
async generateText(input: GenerateTextInput): Promise<GenerateTextOutput> {
const clampedInput = {
...input,
options: this.clampTemperature(input.options),
};
return super.generateText(clampedInput);
}

async *streamText(
input: GenerateTextInput,
): AsyncGenerator<StreamTextOutput> {
const clampedInput = {
...input,
options: this.clampTemperature(input.options),
};
yield* super.streamText(clampedInput);
}

async generateObject<T>(input: GenerateObjectInput): Promise<T> {
const jsonSchema = z.toJSONSchema(input.schema);
const jsonPrompt = `You must respond with valid JSON only, no other text. The JSON must conform to this schema:\n${JSON.stringify(jsonSchema, null, 2)}`;

const systemMessage = { role: 'system' as const, content: jsonPrompt };
const messages = [systemMessage, ...input.messages];

const response = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(messages),
temperature:
this.clampTemperature(input.options)?.temperature ??
this.clampTemperature(this.config.options)?.temperature ??
1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
});

if (response.choices && response.choices.length > 0) {
try {
return input.schema.parse(
JSON.parse(
repairJson(response.choices[0].message.content!, {
extractJson: true,
}) as string,
),
) as T;
} catch (err) {
throw new Error(`Error parsing response from MiniMax: ${err}`);
}
}

throw new Error('No response from MiniMax');
}

async *streamObject<T>(input: GenerateObjectInput): AsyncGenerator<T> {
const jsonSchema = z.toJSONSchema(input.schema);
const jsonPrompt = `You must respond with valid JSON only, no other text. The JSON must conform to this schema:\n${JSON.stringify(jsonSchema, null, 2)}`;

const systemMessage = { role: 'system' as const, content: jsonPrompt };
const messages = [systemMessage, ...input.messages];

let receivedObj = '';

const stream = await this.openAIClient.chat.completions.create({
model: this.config.model,
messages: this.convertToOpenAIMessages(messages),
temperature:
this.clampTemperature(input.options)?.temperature ??
this.clampTemperature(this.config.options)?.temperature ??
1.0,
top_p: input.options?.topP ?? this.config.options?.topP,
max_completion_tokens:
input.options?.maxTokens ?? this.config.options?.maxTokens,
stop: input.options?.stopSequences ?? this.config.options?.stopSequences,
frequency_penalty:
input.options?.frequencyPenalty ??
this.config.options?.frequencyPenalty,
presence_penalty:
input.options?.presencePenalty ?? this.config.options?.presencePenalty,
stream: true,
});

for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0) {
const content = chunk.choices[0].delta.content || '';
receivedObj += content;

try {
yield parse(receivedObj) as T;
} catch {
yield {} as T;
}
}
}
}

private clampTemperature(
options?: GenerateOptions,
): GenerateOptions | undefined {
if (!options) return options;
if (
options.temperature !== undefined &&
options.temperature !== null &&
options.temperature <= 0
Copy link
Copy Markdown
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 14, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Temperature normalization is incomplete for MiniMax: values above 1 are not clamped, and inherited text paths can still send invalid config-level temperatures.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At src/lib/models/providers/minimax/miniMaxLLM.ts, line 123:

<comment>Temperature normalization is incomplete for MiniMax: values above 1 are not clamped, and inherited text paths can still send invalid config-level temperatures.</comment>

<file context>
@@ -0,0 +1,131 @@
+    if (
+      options.temperature !== undefined &&
+      options.temperature !== null &&
+      options.temperature <= 0
+    ) {
+      return { ...options, temperature: 0.01 };
</file context>
Fix with Cubic

) {
return { ...options, temperature: 0.01 };
}
return options;
}
}

export default MiniMaxLLM;