|
| 1 | +# SPDX-FileCopyrightText: 2025 MiromindAI |
| 2 | +# |
| 3 | +# SPDX-License-Identifier: Apache-2.0 |
| 4 | + |
| 5 | +""" |
| 6 | +MiniMax LLM client - OpenAI-compatible provider for MiniMax M2.7 models. |
| 7 | +
|
| 8 | +Supported models: |
| 9 | + - MiniMax-M2.7: Peak performance, ultimate value (default) |
| 10 | + - MiniMax-M2.7-highspeed: Same performance, faster and more agile |
| 11 | +
|
| 12 | +API docs: https://platform.minimax.io/docs/api-reference/text-openai-api |
| 13 | +""" |
| 14 | + |
| 15 | +from typing import Any, Dict, List |
| 16 | + |
| 17 | +from omegaconf import DictConfig |
| 18 | +from openai import AsyncOpenAI, OpenAI |
| 19 | +from tenacity import retry, stop_after_attempt, wait_fixed |
| 20 | + |
| 21 | +from miroflow.llm.base import LLMClientBase |
| 22 | +from miroflow.logging.task_tracer import get_tracer |
| 23 | + |
| 24 | +logger = get_tracer() |
| 25 | + |
| 26 | +# MiniMax models supported |
| 27 | +MINIMAX_MODELS = {"MiniMax-M2.7", "MiniMax-M2.7-highspeed"} |
| 28 | + |
| 29 | +# MiniMax temperature range: (0.0, 1.0] |
| 30 | +MINIMAX_TEMP_MIN = 0.01 |
| 31 | +MINIMAX_TEMP_MAX = 1.0 |
| 32 | + |
| 33 | + |
| 34 | +def _clamp_temperature(temperature: float) -> float: |
| 35 | + """Clamp temperature to MiniMax's valid range (0.0, 1.0].""" |
| 36 | + if temperature <= 0.0: |
| 37 | + return MINIMAX_TEMP_MIN |
| 38 | + if temperature > MINIMAX_TEMP_MAX: |
| 39 | + return MINIMAX_TEMP_MAX |
| 40 | + return temperature |
| 41 | + |
| 42 | + |
| 43 | +class MiniMaxClient(LLMClientBase): |
| 44 | + """ |
| 45 | + MiniMax LLM client using OpenAI-compatible API. |
| 46 | +
|
| 47 | + MiniMax provides high-performance language models accessible via |
| 48 | + an OpenAI-compatible endpoint at https://api.minimax.io/v1. |
| 49 | +
|
| 50 | + Configuration example (YAML): |
| 51 | + provider_class: "MiniMaxClient" |
| 52 | + model_name: "MiniMax-M2.7" |
| 53 | + api_key: ${oc.env:MINIMAX_API_KEY,???} |
| 54 | + base_url: ${oc.env:MINIMAX_BASE_URL,https://api.minimax.io/v1} |
| 55 | + """ |
| 56 | + |
| 57 | + def _create_client(self, config: DictConfig): |
| 58 | + """Create configured OpenAI-compatible client for MiniMax.""" |
| 59 | + if self.async_client: |
| 60 | + return AsyncOpenAI( |
| 61 | + api_key=self.cfg.api_key, |
| 62 | + base_url=self.cfg.base_url, |
| 63 | + timeout=1800, |
| 64 | + ) |
| 65 | + else: |
| 66 | + return OpenAI( |
| 67 | + api_key=self.cfg.api_key, |
| 68 | + base_url=self.cfg.base_url, |
| 69 | + timeout=1800, |
| 70 | + ) |
| 71 | + |
| 72 | + @retry(wait=wait_fixed(10), stop=stop_after_attempt(10)) |
| 73 | + async def _create_message( |
| 74 | + self, |
| 75 | + system_prompt: str, |
| 76 | + messages: List[Dict[str, Any]], |
| 77 | + tools_definitions, |
| 78 | + keep_tool_result: int = -1, |
| 79 | + ): |
| 80 | + """Send message to MiniMax API via OpenAI-compatible endpoint.""" |
| 81 | + logger.debug(f" Calling MiniMax LLM ({'async' if self.async_client else 'sync'})") |
| 82 | + |
| 83 | + # Inject system prompt |
| 84 | + if system_prompt: |
| 85 | + if messages and messages[0]["role"] in ["system", "developer"]: |
| 86 | + messages[0] = { |
| 87 | + "role": "system", |
| 88 | + "content": [dict(type="text", text=system_prompt)], |
| 89 | + } |
| 90 | + else: |
| 91 | + messages.insert( |
| 92 | + 0, |
| 93 | + { |
| 94 | + "role": "system", |
| 95 | + "content": [dict(type="text", text=system_prompt)], |
| 96 | + }, |
| 97 | + ) |
| 98 | + |
| 99 | + messages_copy = self._remove_tool_result_from_messages( |
| 100 | + messages, keep_tool_result |
| 101 | + ) |
| 102 | + |
| 103 | + if tools_definitions: |
| 104 | + tool_list = await self.convert_tool_definition_to_tool_call( |
| 105 | + tools_definitions |
| 106 | + ) |
| 107 | + else: |
| 108 | + tool_list = None |
| 109 | + |
| 110 | + # Clamp temperature to MiniMax valid range |
| 111 | + temperature = _clamp_temperature(self.temperature) |
| 112 | + |
| 113 | + params = { |
| 114 | + "model": self.model_name, |
| 115 | + "temperature": temperature, |
| 116 | + "max_completion_tokens": self.max_tokens, |
| 117 | + "messages": messages_copy, |
| 118 | + "tools": tool_list, |
| 119 | + "stream": False, |
| 120 | + } |
| 121 | + |
| 122 | + if self.top_p != 1.0: |
| 123 | + params["top_p"] = self.top_p |
| 124 | + |
| 125 | + try: |
| 126 | + if self.async_client: |
| 127 | + response = await self.client.chat.completions.create(**params) |
| 128 | + else: |
| 129 | + response = self.client.chat.completions.create(**params) |
| 130 | + |
| 131 | + logger.debug( |
| 132 | + f"MiniMax LLM call status: {getattr(response.choices[0], 'finish_reason', 'N/A')}" |
| 133 | + ) |
| 134 | + return response |
| 135 | + except Exception as e: |
| 136 | + logger.exception(f"MiniMax LLM call failed: {str(e)}") |
| 137 | + raise |
| 138 | + |
| 139 | + def process_llm_response(self, llm_response) -> tuple[str, bool, dict]: |
| 140 | + """Process MiniMax LLM response (OpenAI-compatible format).""" |
| 141 | + if not llm_response or not llm_response.choices: |
| 142 | + logger.debug("Error: MiniMax LLM did not return a valid response.") |
| 143 | + return "", True, {} |
| 144 | + |
| 145 | + finish_reason = llm_response.choices[0].finish_reason |
| 146 | + |
| 147 | + if finish_reason == "stop": |
| 148 | + text = llm_response.choices[0].message.content or "" |
| 149 | + return text, False, {"role": "assistant", "content": text} |
| 150 | + |
| 151 | + if finish_reason == "tool_calls": |
| 152 | + tool_calls = llm_response.choices[0].message.tool_calls |
| 153 | + text = llm_response.choices[0].message.content or "" |
| 154 | + |
| 155 | + if not text: |
| 156 | + descriptions = [] |
| 157 | + for tc in tool_calls: |
| 158 | + descriptions.append( |
| 159 | + f"Using tool {tc.function.name} with arguments: {tc.function.arguments}" |
| 160 | + ) |
| 161 | + text = "\n".join(descriptions) |
| 162 | + |
| 163 | + assistant_message = { |
| 164 | + "role": "assistant", |
| 165 | + "content": text, |
| 166 | + "tool_calls": [ |
| 167 | + { |
| 168 | + "id": tc.id, |
| 169 | + "type": "function", |
| 170 | + "function": { |
| 171 | + "name": tc.function.name, |
| 172 | + "arguments": tc.function.arguments, |
| 173 | + }, |
| 174 | + } |
| 175 | + for tc in tool_calls |
| 176 | + ], |
| 177 | + } |
| 178 | + return text, False, assistant_message |
| 179 | + |
| 180 | + if finish_reason == "length": |
| 181 | + text = llm_response.choices[0].message.content or "" |
| 182 | + if text == "": |
| 183 | + text = "LLM response is empty. This is likely due to thinking block used up all tokens." |
| 184 | + return text, False, {"role": "assistant", "content": text} |
| 185 | + |
| 186 | + raise ValueError(f"Unsupported finish reason: {finish_reason}") |
| 187 | + |
| 188 | + def extract_tool_calls_info(self, llm_response, assistant_response_text): |
| 189 | + """Extract tool call information from MiniMax response.""" |
| 190 | + from miroflow.utils.parsing_utils import parse_llm_response_for_tool_calls |
| 191 | + |
| 192 | + if llm_response.choices[0].finish_reason == "tool_calls": |
| 193 | + return parse_llm_response_for_tool_calls( |
| 194 | + llm_response.choices[0].message.tool_calls |
| 195 | + ) |
| 196 | + return [], [] |
| 197 | + |
| 198 | + def update_message_history( |
| 199 | + self, message_history, tool_call_info, tool_calls_exceeded: bool = False |
| 200 | + ): |
| 201 | + """Update message history with tool call results.""" |
| 202 | + for cur_call_id, tool_result in tool_call_info: |
| 203 | + message_history.append( |
| 204 | + { |
| 205 | + "role": "tool", |
| 206 | + "tool_call_id": cur_call_id, |
| 207 | + "content": tool_result["text"], |
| 208 | + } |
| 209 | + ) |
| 210 | + return message_history |
| 211 | + |
| 212 | + def handle_max_turns_reached_summary_prompt(self, message_history, summary_prompt): |
| 213 | + """Handle max turns reached summary prompt.""" |
| 214 | + return summary_prompt |
0 commit comments