diff --git a/locales/ar/chat.json b/locales/ar/chat.json index c5c0a7c369be1..c18e67e12583c 100644 --- a/locales/ar/chat.json +++ b/locales/ar/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "في عمق التفكير", + "thought": "تم التفكير بعمق (استغرق {{duration}} ثانية)" + }, "regenerate": "إعادة الإنشاء", "roleAndArchive": "الدور والأرشيف", "searchAgentPlaceholder": "مساعد البحث...", diff --git a/locales/ar/components.json b/locales/ar/components.json index 5ca5b783a446a..1943c18540501 100644 --- a/locales/ar/components.json +++ b/locales/ar/components.json @@ -76,6 +76,7 @@ "custom": "نموذج مخصص، الإعداد الافتراضي يدعم الاستدعاء الوظيفي والتعرف البصري، يرجى التحقق من قدرة النموذج على القيام بذلك بناءً على الحالة الفعلية", "file": "يدعم هذا النموذج قراءة وتعرف الملفات المرفوعة", "functionCall": "يدعم هذا النموذج استدعاء الوظائف", + "reasoning": "يدعم هذا النموذج التفكير العميق", "tokens": "يدعم هذا النموذج حتى {{tokens}} رمزًا في جلسة واحدة", "vision": "يدعم هذا النموذج التعرف البصري" }, diff --git a/locales/ar/models.json b/locales/ar/models.json index 3b228392a3e43..773289ab24faf 100644 --- a/locales/ar/models.json +++ b/locales/ar/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "يمتلك هذا النموذج قدرة قوية على فهم الفيديو." }, + "step-1o-vision-32k": { + "description": "يمتلك هذا النموذج قدرة قوية على فهم الصور. مقارنةً بسلسلة نماذج step-1v، فإنه يتمتع بأداء بصري أقوى." + }, "step-1v-32k": { "description": "يدعم المدخلات البصرية، يعزز تجربة التفاعل متعدد الوسائط." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "يدعم تفاعلات سياق كبيرة، مناسب لمشاهد الحوار المعقدة." }, + "step-2-mini": { + "description": "نموذج كبير سريع يعتمد على بنية الانتباه الجديدة MFA، يحقق نتائج مشابهة لـ step1 بتكلفة منخفضة جداً، مع الحفاظ على قدرة أعلى على المعالجة وزمن استجابة أسرع. يمكنه التعامل مع المهام العامة، ويتميز بقدرات قوية في البرمجة." + }, "taichu2_mm": { "description": "يجمع بين فهم الصور، ونقل المعرفة، والاستدلال المنطقي، ويظهر أداءً بارزًا في مجال الأسئلة والأجوبة النصية والصورية." }, diff --git a/locales/bg-BG/chat.json b/locales/bg-BG/chat.json index 4e58a43c70d1b..99635c1ddd4ae 100644 --- a/locales/bg-BG/chat.json +++ b/locales/bg-BG/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "В дълбочинно размисъл", + "thought": "Дълбоко помислих (отне ми {{duration}} секунди)" + }, "regenerate": "Прегенерирай", "roleAndArchive": "Роля и архив", "searchAgentPlaceholder": "Търсач на помощ...", diff --git a/locales/bg-BG/components.json b/locales/bg-BG/components.json index 43f732c61c3d6..23e2882118388 100644 --- a/locales/bg-BG/components.json +++ b/locales/bg-BG/components.json @@ -76,6 +76,7 @@ "custom": "Потребителски модел, по подразбиране поддържа функционалност за функционални обаждания и визуално разпознаване, моля, потвърдете наличието на тези възможности спрямо реалните условия", "file": "Този модел поддържа качване на файлове и разпознаване", "functionCall": "Този модел поддържа функционални обаждания (Function Call)", + "reasoning": "Този модел поддържа дълбочинно мислене", "tokens": "Този модел поддържа до {{tokens}} токена за една сесия", "vision": "Този модел поддържа визуално разпознаване" }, diff --git a/locales/bg-BG/models.json b/locales/bg-BG/models.json index 85b3ccef29c6b..86eef8ca2733d 100644 --- a/locales/bg-BG/models.json +++ b/locales/bg-BG/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Този модел разполага с мощни способности за разбиране на видео." }, + "step-1o-vision-32k": { + "description": "Този модел разполага с мощни способности за разбиране на изображения. В сравнение с моделите от серията step-1v, предлага по-силна визуална производителност." + }, "step-1v-32k": { "description": "Поддържа визуални входове, подобряваща мултимодалното взаимодействие." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Поддържа взаимодействия с голям мащаб на контекста, подходящи за сложни диалогови сценарии." }, + "step-2-mini": { + "description": "Модел с бърза производителност, базиран на новото поколение собствена архитектура Attention MFA, който постига резултати, подобни на step1 с много ниски разходи, като същевременно поддържа по-висока производителност и по-бързо време за отговор. Може да обработва общи задачи и притежава специализирани умения в кодирането." + }, "taichu2_mm": { "description": "Сливайки способности за разбиране на изображения, прехвърляне на знания и логическо обяснение, моделът показва отлични резултати в областта на въпросите и отговорите на текст и изображения." }, diff --git a/locales/de-DE/chat.json b/locales/de-DE/chat.json index ec75cbce915e4..f68dbfb255e8e 100644 --- a/locales/de-DE/chat.json +++ b/locales/de-DE/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Tiefes Nachdenken", + "thought": "Tiefgründig nachgedacht (Dauer: {{duration}} Sekunden)" + }, "regenerate": "Neu generieren", "roleAndArchive": "Rolle und Archiv", "searchAgentPlaceholder": "Suchassistent...", diff --git a/locales/de-DE/components.json b/locales/de-DE/components.json index e7dd0cbca3ffb..09129f545c938 100644 --- a/locales/de-DE/components.json +++ b/locales/de-DE/components.json @@ -76,6 +76,7 @@ "custom": "Benutzerdefiniertes Modell, standardmäßig unterstützt es sowohl Funktionsaufrufe als auch visuelle Erkennung. Bitte überprüfen Sie die Verfügbarkeit dieser Fähigkeiten basierend auf den tatsächlichen Gegebenheiten.", "file": "Dieses Modell unterstützt das Hochladen von Dateien und deren Erkennung.", "functionCall": "Dieses Modell unterstützt Funktionsaufrufe.", + "reasoning": "Dieses Modell unterstützt tiefes Denken", "tokens": "Dieses Modell unterstützt maximal {{tokens}} Tokens pro Sitzung.", "vision": "Dieses Modell unterstützt die visuelle Erkennung." }, diff --git a/locales/de-DE/models.json b/locales/de-DE/models.json index ecc2c8b12e553..aa6d2c6185813 100644 --- a/locales/de-DE/models.json +++ b/locales/de-DE/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Dieses Modell verfügt über starke Fähigkeiten zur Videoanalyse." }, + "step-1o-vision-32k": { + "description": "Dieses Modell verfügt über starke Fähigkeiten zur Bildverständnis. Im Vergleich zu den Modellen der Schritt-1v-Serie bietet es eine verbesserte visuelle Leistung." + }, "step-1v-32k": { "description": "Unterstützt visuelle Eingaben und verbessert die multimodale Interaktionserfahrung." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Unterstützt groß angelegte Kontextinteraktionen und eignet sich für komplexe Dialogszenarien." }, + "step-2-mini": { + "description": "Ein ultraschnelles Großmodell, das auf der neuen, selbstentwickelten Attention-Architektur MFA basiert. Es erreicht mit extrem niedrigen Kosten ähnliche Ergebnisse wie Schritt 1 und bietet gleichzeitig eine höhere Durchsatzrate und schnellere Reaktionszeiten. Es kann allgemeine Aufgaben bearbeiten und hat besondere Fähigkeiten im Bereich der Codierung." + }, "taichu2_mm": { "description": "Integriert Fähigkeiten zur Bildverstehung, Wissensübertragung und logischen Attribution und zeigt herausragende Leistungen im Bereich der Bild-Text-Fragen." }, diff --git a/locales/en-US/chat.json b/locales/en-US/chat.json index d2c1a73bd6882..ed33c4b3be9d2 100644 --- a/locales/en-US/chat.json +++ b/locales/en-US/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Deep in thought", + "thought": "Deeply thought (took {{duration}} seconds)" + }, "regenerate": "Regenerate", "roleAndArchive": "Role and Archive", "searchAgentPlaceholder": "Search assistants...", diff --git a/locales/en-US/components.json b/locales/en-US/components.json index 9b7334f94b541..4d35c2194ddf5 100644 --- a/locales/en-US/components.json +++ b/locales/en-US/components.json @@ -76,6 +76,7 @@ "custom": "Custom model, by default, supports both function call and visual recognition. Please verify the availability of the above capabilities based on actual situations.", "file": "This model supports file upload for reading and recognition.", "functionCall": "This model supports function call.", + "reasoning": "This model supports deep thinking", "tokens": "This model supports up to {{tokens}} tokens in a single session.", "vision": "This model supports visual recognition." }, diff --git a/locales/en-US/models.json b/locales/en-US/models.json index 0d1302c43ab99..31359aa5779ee 100644 --- a/locales/en-US/models.json +++ b/locales/en-US/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "This model has powerful video understanding capabilities." }, + "step-1o-vision-32k": { + "description": "This model possesses powerful image understanding capabilities. Compared to the step-1v series models, it offers enhanced visual performance." + }, "step-1v-32k": { "description": "Supports visual input, enhancing multimodal interaction experiences." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Supports large-scale context interactions, suitable for complex dialogue scenarios." }, + "step-2-mini": { + "description": "A high-speed large model based on the next-generation self-developed Attention architecture MFA, achieving results similar to step-1 at a very low cost, while maintaining higher throughput and faster response times. It is capable of handling general tasks and has specialized skills in coding." + }, "taichu2_mm": { "description": "Integrating capabilities in image understanding, knowledge transfer, and logical attribution, it excels in the field of image-text question answering." }, diff --git a/locales/es-ES/chat.json b/locales/es-ES/chat.json index 5b4ade1727e59..3cc691a698861 100644 --- a/locales/es-ES/chat.json +++ b/locales/es-ES/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Pensando profundamente", + "thought": "He reflexionado profundamente (durante {{duration}} segundos)" + }, "regenerate": "Regenerar", "roleAndArchive": "Rol y archivo", "searchAgentPlaceholder": "Asistente de búsqueda...", diff --git a/locales/es-ES/components.json b/locales/es-ES/components.json index ebcc3d74a924c..4be4c0f14d07f 100644 --- a/locales/es-ES/components.json +++ b/locales/es-ES/components.json @@ -76,6 +76,7 @@ "custom": "Modelo personalizado: admite llamadas de función y reconocimiento visual. Verifique la disponibilidad de estas capacidades según sea necesario.", "file": "Este modelo admite la carga y reconocimiento de archivos.", "functionCall": "Este modelo admite llamadas de función.", + "reasoning": "Este modelo admite un pensamiento profundo", "tokens": "Este modelo admite un máximo de {{tokens}} tokens por sesión.", "vision": "Este modelo admite el reconocimiento visual." }, diff --git a/locales/es-ES/models.json b/locales/es-ES/models.json index c1d84a1ddfdcd..7c02d6627379e 100644 --- a/locales/es-ES/models.json +++ b/locales/es-ES/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Este modelo tiene una potente capacidad de comprensión de video." }, + "step-1o-vision-32k": { + "description": "Este modelo posee una poderosa capacidad de comprensión de imágenes. En comparación con la serie de modelos step-1v, ofrece un rendimiento visual superior." + }, "step-1v-32k": { "description": "Soporta entradas visuales, mejorando la experiencia de interacción multimodal." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Soporta interacciones de contexto a gran escala, adecuado para escenarios de diálogo complejos." }, + "step-2-mini": { + "description": "Un modelo de gran velocidad basado en la nueva arquitectura de atención autogestionada MFA, que logra efectos similares a los de step1 a un costo muy bajo, manteniendo al mismo tiempo un mayor rendimiento y tiempos de respuesta más rápidos. Capaz de manejar tareas generales, con habilidades destacadas en programación." + }, "taichu2_mm": { "description": "Integra capacidades de comprensión de imágenes, transferencia de conocimiento, atribución lógica, destacándose en el campo de preguntas y respuestas basadas en texto e imagen." }, diff --git a/locales/fa-IR/chat.json b/locales/fa-IR/chat.json index 13f0337069ced..9e1e4c20ee086 100644 --- a/locales/fa-IR/chat.json +++ b/locales/fa-IR/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "در حال تفکر عمیق", + "thought": "به طور عمیق فکر شده است (زمان صرف شده: {{duration}} ثانیه)" + }, "regenerate": "بازتولید", "roleAndArchive": "نقش‌ها و بایگانی", "searchAgentPlaceholder": "جستجوی دستیار...", diff --git a/locales/fa-IR/components.json b/locales/fa-IR/components.json index 5f363085fdaee..789729ba708af 100644 --- a/locales/fa-IR/components.json +++ b/locales/fa-IR/components.json @@ -76,6 +76,7 @@ "custom": "مدل سفارشی، تنظیمات پیش‌فرض از فراخوانی توابع و تشخیص بصری پشتیبانی می‌کند، لطفاً قابلیت‌های فوق را بر اساس شرایط واقعی بررسی کنید", "file": "این مدل از بارگذاری و شناسایی فایل‌ها پشتیبانی می‌کند", "functionCall": "این مدل از فراخوانی توابع (Function Call) پشتیبانی می‌کند", + "reasoning": "این مدل از تفکر عمیق پشتیبانی می‌کند", "tokens": "این مدل در هر جلسه حداکثر از {{tokens}} توکن پشتیبانی می‌کند", "vision": "این مدل از تشخیص بصری پشتیبانی می‌کند" }, diff --git a/locales/fa-IR/models.json b/locales/fa-IR/models.json index b181642bf30ba..79456ea7b1f85 100644 --- a/locales/fa-IR/models.json +++ b/locales/fa-IR/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "این مدل دارای توانایی‌های قوی در درک ویدیو است." }, + "step-1o-vision-32k": { + "description": "این مدل دارای توانایی‌های قوی در درک تصویر است. در مقایسه با مدل‌های سری step-1v، عملکرد بصری بهتری دارد." + }, "step-1v-32k": { "description": "پشتیبانی از ورودی بصری، تقویت تجربه تعامل چندحالته." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "پشتیبانی از تعاملات متنی گسترده، مناسب برای سناریوهای مکالمه پیچیده." }, + "step-2-mini": { + "description": "مدل بزرگ فوق‌العاده سریع مبتنی بر معماری توجه MFA که به‌طور خودجوش توسعه یافته است، با هزینه بسیار کم به نتایجی مشابه با مرحله ۱ دست می‌یابد و در عین حال توانایی پردازش بالاتر و زمان پاسخ سریع‌تری را حفظ می‌کند. این مدل قادر به انجام وظایف عمومی است و در توانایی‌های کدنویسی تخصص دارد." + }, "taichu2_mm": { "description": "ترکیبی از درک تصویر، انتقال دانش، استدلال منطقی و غیره، در زمینه پرسش و پاسخ تصویری و متنی عملکرد برجسته‌ای دارد." }, diff --git a/locales/fr-FR/chat.json b/locales/fr-FR/chat.json index 960f2b32b4b33..d66cbc0475325 100644 --- a/locales/fr-FR/chat.json +++ b/locales/fr-FR/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "En pleine réflexion profonde", + "thought": "J'ai réfléchi en profondeur (durée : {{duration}} secondes)" + }, "regenerate": "Regénérer", "roleAndArchive": "Rôle et archivage", "searchAgentPlaceholder": "Assistant de recherche...", diff --git a/locales/fr-FR/components.json b/locales/fr-FR/components.json index 668dae2116ecd..9e61b5276dd69 100644 --- a/locales/fr-FR/components.json +++ b/locales/fr-FR/components.json @@ -76,6 +76,7 @@ "custom": "Modèle personnalisé par défaut prenant en charge à la fois les appels de fonction et la reconnaissance visuelle. Veuillez vérifier la disponibilité de ces capacités en fonction de vos besoins réels.", "file": "Ce modèle prend en charge la lecture et la reconnaissance de fichiers téléchargés.", "functionCall": "Ce modèle prend en charge les appels de fonction.", + "reasoning": "Ce modèle prend en charge une réflexion approfondie", "tokens": "Ce modèle prend en charge jusqu'à {{tokens}} jetons par session.", "vision": "Ce modèle prend en charge la reconnaissance visuelle." }, diff --git a/locales/fr-FR/models.json b/locales/fr-FR/models.json index b3091c4593dc2..08cebbd1889c5 100644 --- a/locales/fr-FR/models.json +++ b/locales/fr-FR/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Ce modèle possède de puissantes capacités de compréhension vidéo." }, + "step-1o-vision-32k": { + "description": "Ce modèle possède de puissantes capacités de compréhension d'image. Par rapport à la série de modèles step-1v, il offre des performances visuelles supérieures." + }, "step-1v-32k": { "description": "Prend en charge les entrées visuelles, améliorant l'expérience d'interaction multimodale." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Prend en charge des interactions contextuelles à grande échelle, adapté aux scénarios de dialogue complexes." }, + "step-2-mini": { + "description": "Un modèle de grande taille ultra-rapide basé sur la nouvelle architecture d'attention auto-développée MFA, atteignant des résultats similaires à ceux de step1 à un coût très bas, tout en maintenant un débit plus élevé et un temps de réponse plus rapide. Capable de traiter des tâches générales, avec des compétences particulières en matière de codage." + }, "taichu2_mm": { "description": "Intègre des capacités de compréhension d'images, de transfert de connaissances et d'attribution logique, se distinguant dans le domaine des questions-réponses textuelles et visuelles." }, diff --git a/locales/it-IT/chat.json b/locales/it-IT/chat.json index 4bc341a353696..57e0cabc667ea 100644 --- a/locales/it-IT/chat.json +++ b/locales/it-IT/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "In profonda riflessione", + "thought": "Pensato in profondità (tempo impiegato {{duration}} secondi)" + }, "regenerate": "Rigenera", "roleAndArchive": "Ruolo e archivio", "searchAgentPlaceholder": "Assistente di ricerca...", diff --git a/locales/it-IT/components.json b/locales/it-IT/components.json index 2276ea5f47f44..048bbe8a4f121 100644 --- a/locales/it-IT/components.json +++ b/locales/it-IT/components.json @@ -76,6 +76,7 @@ "custom": "Modello personalizzato: di default supporta sia la chiamata di funzioni che il riconoscimento visivo. Verifica l'effettiva disponibilità di tali funzionalità.", "file": "Questo modello supporta il caricamento e il riconoscimento di file.", "functionCall": "Questo modello supporta la chiamata di funzioni.", + "reasoning": "Questo modello supporta un pensiero profondo", "tokens": "Questo modello supporta un massimo di {{tokens}} token per sessione.", "vision": "Questo modello supporta il riconoscimento visivo." }, diff --git a/locales/it-IT/models.json b/locales/it-IT/models.json index 61c3284701cea..239c65c1fb8d8 100644 --- a/locales/it-IT/models.json +++ b/locales/it-IT/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Questo modello possiede potenti capacità di comprensione video." }, + "step-1o-vision-32k": { + "description": "Questo modello possiede una potente capacità di comprensione delle immagini. Rispetto ai modelli della serie step-1v, offre prestazioni visive superiori." + }, "step-1v-32k": { "description": "Supporta input visivi, migliorando l'esperienza di interazione multimodale." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Supporta interazioni di contesto su larga scala, adatto per scenari di dialogo complessi." }, + "step-2-mini": { + "description": "Un modello di grandi dimensioni ad alta velocità basato sulla nuova architettura di attenzione auto-sviluppata MFA, in grado di raggiungere risultati simili a quelli di step1 a un costo molto basso, mantenendo al contempo una maggiore capacità di elaborazione e tempi di risposta più rapidi. È in grado di gestire compiti generali, con competenze particolari nella programmazione." + }, "taichu2_mm": { "description": "Integra capacità di comprensione delle immagini, trasferimento di conoscenze, attribuzione logica, ecc., e si distingue nel campo delle domande e risposte basate su testo e immagini." }, diff --git a/locales/ja-JP/chat.json b/locales/ja-JP/chat.json index 50897c0ba4898..ba68d97cbd234 100644 --- a/locales/ja-JP/chat.json +++ b/locales/ja-JP/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "深く考えています", + "thought": "深く考えました(所要時間 {{duration}} 秒)" + }, "regenerate": "再生成", "roleAndArchive": "役割とアーカイブ", "searchAgentPlaceholder": "検索アシスタント...", diff --git a/locales/ja-JP/components.json b/locales/ja-JP/components.json index 87d1dbe7b76ba..3164eac2967e8 100644 --- a/locales/ja-JP/components.json +++ b/locales/ja-JP/components.json @@ -76,6 +76,7 @@ "custom": "カスタムモデル、デフォルトでは関数呼び出しとビジョン認識の両方をサポートしています。上記機能の有効性を確認してください。", "file": "このモデルはファイルのアップロードと認識をサポートしています。", "functionCall": "このモデルは関数呼び出し(Function Call)をサポートしています。", + "reasoning": "このモデルは深い思考をサポートしています", "tokens": "このモデルは1つのセッションあたり最大{{tokens}}トークンをサポートしています。", "vision": "このモデルはビジョン認識をサポートしています。" }, diff --git a/locales/ja-JP/models.json b/locales/ja-JP/models.json index 58969313753a1..408b7b2f931eb 100644 --- a/locales/ja-JP/models.json +++ b/locales/ja-JP/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "このモデルは、強力なビデオ理解能力を備えています。" }, + "step-1o-vision-32k": { + "description": "このモデルは強力な画像理解能力を持っています。step-1vシリーズモデルと比較して、より優れた視覚性能を発揮します。" + }, "step-1v-32k": { "description": "視覚入力をサポートし、多モーダルインタラクション体験を強化します。" }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "大規模なコンテキストインタラクションをサポートし、複雑な対話シナリオに適しています。" }, + "step-2-mini": { + "description": "新世代の自社開発のAttentionアーキテクチャMFAに基づく超高速大モデルで、非常に低コストでstep1と同様の効果を達成しつつ、より高いスループットと迅速な応答遅延を維持しています。一般的なタスクを処理でき、コード能力において特長を持っています。" + }, "taichu2_mm": { "description": "画像理解、知識移転、論理帰納などの能力を融合し、画像とテキストの質問応答分野で優れたパフォーマンスを発揮します。" }, diff --git a/locales/ko-KR/chat.json b/locales/ko-KR/chat.json index 439af05c28b7d..71a2c123ac4c9 100644 --- a/locales/ko-KR/chat.json +++ b/locales/ko-KR/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "심층 사고 중", + "thought": "심층적으로 생각했습니다 (소요 시간: {{duration}} 초)" + }, "regenerate": "재생성", "roleAndArchive": "역할 및 아카이브", "searchAgentPlaceholder": "검색 도우미...", diff --git a/locales/ko-KR/components.json b/locales/ko-KR/components.json index 42c28e5bc1771..a6897be88a049 100644 --- a/locales/ko-KR/components.json +++ b/locales/ko-KR/components.json @@ -76,6 +76,7 @@ "custom": "사용자 정의 모델, 기본적으로 함수 호출 및 시각 인식을 모두 지원하며, 실제 기능을 확인하세요", "file": "이 모델은 파일 업로드 및 인식을 지원합니다", "functionCall": "이 모델은 함수 호출을 지원합니다", + "reasoning": "이 모델은 깊이 있는 사고를 지원합니다.", "tokens": "이 모델은 단일 세션당 최대 {{tokens}} 토큰을 지원합니다", "vision": "이 모델은 시각 인식을 지원합니다" }, diff --git a/locales/ko-KR/models.json b/locales/ko-KR/models.json index ffd9078359b82..7e42cc837af22 100644 --- a/locales/ko-KR/models.json +++ b/locales/ko-KR/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "이 모델은 강력한 비디오 이해 능력을 가지고 있습니다." }, + "step-1o-vision-32k": { + "description": "이 모델은 강력한 이미지 이해 능력을 가지고 있습니다. step-1v 시리즈 모델에 비해 더 강력한 시각 성능을 자랑합니다." + }, "step-1v-32k": { "description": "시각 입력을 지원하여 다중 모달 상호작용 경험을 강화합니다." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "대규모 컨텍스트 상호작용을 지원하며, 복잡한 대화 시나리오에 적합합니다." }, + "step-2-mini": { + "description": "신세대 자체 개발 Attention 아키텍처인 MFA를 기반으로 한 초고속 대형 모델로, 매우 낮은 비용으로 step1과 유사한 효과를 달성하면서도 더 높은 처리량과 더 빠른 응답 지연을 유지합니다. 일반적인 작업을 처리할 수 있으며, 코드 능력에 있어 특장점을 가지고 있습니다." + }, "taichu2_mm": { "description": "이미지 이해, 지식 이전, 논리 귀속 등의 능력을 통합하여, 이미지-텍스트 질문 응답 분야에서 뛰어난 성능을 보입니다." }, diff --git a/locales/nl-NL/chat.json b/locales/nl-NL/chat.json index a6116797935eb..ea575b32de1c1 100644 --- a/locales/nl-NL/chat.json +++ b/locales/nl-NL/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Diep aan het denken", + "thought": "Diep nagedacht (tijd: {{duration}} seconden)" + }, "regenerate": "Opnieuw genereren", "roleAndArchive": "Rol en archief", "searchAgentPlaceholder": "Zoekassistent...", diff --git a/locales/nl-NL/components.json b/locales/nl-NL/components.json index cffd157f3fde6..bc6a707b4cb3b 100644 --- a/locales/nl-NL/components.json +++ b/locales/nl-NL/components.json @@ -76,6 +76,7 @@ "custom": "Custom model, by default, supports both function call and visual recognition. Please verify the availability of the above capabilities based on actual needs.", "file": "This model supports file upload for reading and recognition.", "functionCall": "This model supports function call.", + "reasoning": "Dit model ondersteunt diepgaand denken", "tokens": "This model supports up to {{tokens}} tokens in a single session.", "vision": "This model supports visual recognition." }, diff --git a/locales/nl-NL/models.json b/locales/nl-NL/models.json index 40c29a01e90af..abc7d63fdfff6 100644 --- a/locales/nl-NL/models.json +++ b/locales/nl-NL/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Dit model heeft krachtige video begrip capaciteiten." }, + "step-1o-vision-32k": { + "description": "Dit model heeft krachtige beeldbegripcapaciteiten. In vergelijking met de step-1v serie modellen heeft het een sterkere visuele prestatie." + }, "step-1v-32k": { "description": "Ondersteunt visuele invoer, verbetert de multimodale interactie-ervaring." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Ondersteunt grootschalige contextinteracties, geschikt voor complexe gespreksscenario's." }, + "step-2-mini": { + "description": "Een razendsnel groot model gebaseerd op de nieuwe generatie zelfontwikkelde Attention-architectuur MFA, dat met zeer lage kosten vergelijkbare resultaten als step1 behaalt, terwijl het een hogere doorvoer en snellere responstijd behoudt. Het kan algemene taken verwerken en heeft speciale vaardigheden op het gebied van codering." + }, "taichu2_mm": { "description": "Gecombineerd met beeldbegrip, kennisoverdracht en logische toerekening, excelleert het in het domein van vraag-en-antwoord met tekst en afbeeldingen." }, diff --git a/locales/pl-PL/chat.json b/locales/pl-PL/chat.json index bf18d25f4d4e1..cf04e57bf1c09 100644 --- a/locales/pl-PL/chat.json +++ b/locales/pl-PL/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Głęboko myślę", + "thought": "Głęboko myślałem (czas: {{duration}} sekund)" + }, "regenerate": "Wygeneruj ponownie", "roleAndArchive": "Rola i archiwum", "searchAgentPlaceholder": "Wyszukaj pomocnika...", diff --git a/locales/pl-PL/components.json b/locales/pl-PL/components.json index 700366da029b3..339c96a500405 100644 --- a/locales/pl-PL/components.json +++ b/locales/pl-PL/components.json @@ -76,6 +76,7 @@ "custom": "Niestandardowy model, domyślnie obsługujący zarówno wywołania funkcji, jak i rozpoznawanie wizualne. Proszę zweryfikować możliwość użycia tych funkcji w praktyce.", "file": "Ten model obsługuje wczytywanie plików i rozpoznawanie", "functionCall": "Ten model obsługuje wywołania funkcji (Function Call).", + "reasoning": "Ten model wspiera głębokie myślenie", "tokens": "Ten model obsługuje maksymalnie {{tokens}} tokenów w pojedynczej sesji.", "vision": "Ten model obsługuje rozpoznawanie wizualne." }, diff --git a/locales/pl-PL/models.json b/locales/pl-PL/models.json index 1074f753585a2..554aa7b2c171d 100644 --- a/locales/pl-PL/models.json +++ b/locales/pl-PL/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Ten model ma potężne zdolności rozumienia wideo." }, + "step-1o-vision-32k": { + "description": "Ten model ma potężne zdolności rozumienia obrazów. W porównaniu do modeli z serii step-1v, oferuje lepsze osiągi wizualne." + }, "step-1v-32k": { "description": "Obsługuje wejścia wizualne, wzmacniając doświadczenie interakcji multimodalnych." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Obsługuje interakcje z dużą ilością kontekstu, idealny do złożonych scenariuszy dialogowych." }, + "step-2-mini": { + "description": "Model oparty na nowej generacji własnej architektury Attention MFA, osiągający podobne wyniki jak step1 przy bardzo niskich kosztach, jednocześnie zapewniając wyższą przepustowość i szybszy czas reakcji. Potrafi obsługiwać ogólne zadania, a w zakresie umiejętności kodowania ma szczególne zdolności." + }, "taichu2_mm": { "description": "Łączy zdolności rozumienia obrazów, transferu wiedzy, logicznego wnioskowania i wyróżnia się w dziedzinie pytań i odpowiedzi związanych z obrazem i tekstem." }, diff --git a/locales/pt-BR/chat.json b/locales/pt-BR/chat.json index e5e29442a4a19..9f3e70e1d1eda 100644 --- a/locales/pt-BR/chat.json +++ b/locales/pt-BR/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Pensando profundamente", + "thought": "Pensou profundamente (tempo gasto: {{duration}} segundos)" + }, "regenerate": "Regenerar", "roleAndArchive": "Função e Arquivo", "searchAgentPlaceholder": "Assistente de busca...", diff --git a/locales/pt-BR/components.json b/locales/pt-BR/components.json index 53f87d52c3ab9..3fbba5864f4b4 100644 --- a/locales/pt-BR/components.json +++ b/locales/pt-BR/components.json @@ -76,6 +76,7 @@ "custom": "Modelo personalizado, por padrão, suporta chamadas de função e reconhecimento visual. Por favor, verifique a disponibilidade dessas capacidades de acordo com a situação real.", "file": "Este modelo suporta leitura e reconhecimento de arquivos enviados.", "functionCall": "Este modelo suporta chamadas de função.", + "reasoning": "Este modelo suporta pensamento profundo", "tokens": "Este modelo suporta no máximo {{tokens}} tokens por sessão.", "vision": "Este modelo suporta reconhecimento visual." }, diff --git a/locales/pt-BR/models.json b/locales/pt-BR/models.json index 38937c06d88b7..341572d9f2585 100644 --- a/locales/pt-BR/models.json +++ b/locales/pt-BR/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Este modelo possui uma poderosa capacidade de compreensão de vídeo." }, + "step-1o-vision-32k": { + "description": "Este modelo possui uma poderosa capacidade de compreensão de imagens. Em comparação com a série de modelos step-1v, apresenta um desempenho visual superior." + }, "step-1v-32k": { "description": "Suporta entradas visuais, aprimorando a experiência de interação multimodal." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Suporta interações de contexto em larga escala, adequado para cenários de diálogo complexos." }, + "step-2-mini": { + "description": "Um modelo de grande escala de alta velocidade baseado na nova arquitetura de atenção auto-desenvolvida MFA, alcançando resultados semelhantes ao step1 com um custo muito baixo, enquanto mantém uma maior taxa de transferência e um tempo de resposta mais rápido. Capaz de lidar com tarefas gerais, possui especialização em habilidades de codificação." + }, "taichu2_mm": { "description": "Integra capacidades de compreensão de imagem, transferência de conhecimento e atribuição lógica, destacando-se no campo de perguntas e respostas baseadas em texto e imagem." }, diff --git a/locales/ru-RU/chat.json b/locales/ru-RU/chat.json index 12d326aeda5c2..f5cab84a10990 100644 --- a/locales/ru-RU/chat.json +++ b/locales/ru-RU/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Глубокое размышление", + "thought": "Глубоко размышлял (время: {{duration}} секунд)" + }, "regenerate": "Сгенерировать заново", "roleAndArchive": "Роль и архив", "searchAgentPlaceholder": "Поиск помощника...", diff --git a/locales/ru-RU/components.json b/locales/ru-RU/components.json index fc522a24a2fb0..2cd27de67b243 100644 --- a/locales/ru-RU/components.json +++ b/locales/ru-RU/components.json @@ -76,6 +76,7 @@ "custom": "Пользовательская модель по умолчанию поддерживает как вызов функций, так и распознавание изображений. Пожалуйста, проверьте доступность указанных возможностей в вашем случае", "file": "Эта модель поддерживает загрузку и распознавание файлов", "functionCall": "Эта модель поддерживает вызов функций", + "reasoning": "Эта модель поддерживает глубокое мышление", "tokens": "Эта модель поддерживает до {{tokens}} токенов в одной сессии", "vision": "Эта модель поддерживает распознавание изображений" }, diff --git a/locales/ru-RU/models.json b/locales/ru-RU/models.json index 89c3c489a151e..3a65560eedb36 100644 --- a/locales/ru-RU/models.json +++ b/locales/ru-RU/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Эта модель обладает мощными возможностями понимания видео." }, + "step-1o-vision-32k": { + "description": "Эта модель обладает мощными способностями к пониманию изображений. По сравнению с серией моделей step-1v, она имеет более высокую визуальную производительность." + }, "step-1v-32k": { "description": "Поддерживает визуальный ввод, улучшая мультимодальный опыт взаимодействия." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Поддерживает масштабные взаимодействия контекста, подходит для сложных диалоговых сценариев." }, + "step-2-mini": { + "description": "Супербыстрая большая модель на основе новой самодельной архитектуры внимания MFA, достигающая аналогичных результатов, как step1, при очень низких затратах, одновременно обеспечивая более высокую пропускную способность и более быстрое время отклика. Способна обрабатывать общие задачи и обладает особыми навыками в кодировании." + }, "taichu2_mm": { "description": "Объединяет способности понимания изображений, переноса знаний, логической атрибуции и демонстрирует выдающиеся результаты в области вопросов и ответов на основе текста и изображений." }, diff --git a/locales/tr-TR/chat.json b/locales/tr-TR/chat.json index 9d44c8314b042..83fcebb2f1782 100644 --- a/locales/tr-TR/chat.json +++ b/locales/tr-TR/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Derin düşünme aşamasında", + "thought": "Derinlemesine düşündü (geçen süre {{duration}} saniye)" + }, "regenerate": "Tekrarla", "roleAndArchive": "Rol ve Arşiv", "searchAgentPlaceholder": "Arama Asistanı...", diff --git a/locales/tr-TR/components.json b/locales/tr-TR/components.json index 0ad79d37cc9a0..9275dbd001f10 100644 --- a/locales/tr-TR/components.json +++ b/locales/tr-TR/components.json @@ -76,6 +76,7 @@ "custom": "Özel model, varsayılan olarak hem fonksiyon çağrısını hem de görüntü tanımayı destekler, yukarıdaki yeteneklerin kullanılabilirliğini doğrulamak için lütfen gerçek durumu kontrol edin", "file": "Bu model dosya yükleme ve tanımayı destekler", "functionCall": "Bu model fonksiyon çağrısını destekler", + "reasoning": "Bu model derin düşünmeyi destekler", "tokens": "Bu model tek bir oturumda en fazla {{tokens}} Token destekler", "vision": "Bu model görüntü tanımıyı destekler" }, diff --git a/locales/tr-TR/models.json b/locales/tr-TR/models.json index fe19681a6a107..198b31482b63f 100644 --- a/locales/tr-TR/models.json +++ b/locales/tr-TR/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Bu model, güçlü bir video anlama yeteneğine sahiptir." }, + "step-1o-vision-32k": { + "description": "Bu model, güçlü bir görüntü anlama yeteneğine sahiptir. Step-1v serisi modellere kıyasla daha güçlü bir görsel performansa sahiptir." + }, "step-1v-32k": { "description": "Görsel girdi desteği sunar, çok modlu etkileşim deneyimini artırır." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Büyük ölçekli bağlam etkileşimlerini destekler, karmaşık diyalog senaryoları için uygundur." }, + "step-2-mini": { + "description": "Yeni nesil kendi geliştirdiğimiz MFA Attention mimarisine dayanan hızlı büyük model, çok düşük maliyetle step1 ile benzer sonuçlar elde ederken, daha yüksek bir throughput ve daha hızlı yanıt süresi sağlıyor. Genel görevleri işleyebilme yeteneğine sahip olup, kodlama yeteneklerinde uzmanlık gösteriyor." + }, "taichu2_mm": { "description": "Görüntü anlama, bilgi transferi, mantıksal atıf gibi yetenekleri birleştirerek, metin ve görüntü ile soru-cevap alanında öne çıkmaktadır." }, diff --git a/locales/vi-VN/chat.json b/locales/vi-VN/chat.json index 0e349fbafd5a2..9b0209e649d0b 100644 --- a/locales/vi-VN/chat.json +++ b/locales/vi-VN/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "Đang suy nghĩ sâu sắc", + "thought": "Đã suy nghĩ sâu sắc (thời gian: {{duration}} giây)" + }, "regenerate": "Tạo lại", "roleAndArchive": "Vai trò và lưu trữ", "searchAgentPlaceholder": "Trợ lý tìm kiếm...", diff --git a/locales/vi-VN/components.json b/locales/vi-VN/components.json index 221493394473a..2257c2cc29dd0 100644 --- a/locales/vi-VN/components.json +++ b/locales/vi-VN/components.json @@ -76,6 +76,7 @@ "custom": "Mô hình tùy chỉnh, mặc định hỗ trợ cả cuộc gọi hàm và nhận diện hình ảnh, vui lòng xác minh khả năng sử dụng của chúng theo tình hình cụ thể", "file": "Mô hình này hỗ trợ tải lên và nhận diện tệp", "functionCall": "Mô hình này hỗ trợ cuộc gọi hàm (Function Call)", + "reasoning": "Mô hình này hỗ trợ tư duy sâu sắc", "tokens": "Mỗi phiên của mô hình này hỗ trợ tối đa {{tokens}} Tokens", "vision": "Mô hình này hỗ trợ nhận diện hình ảnh" }, diff --git a/locales/vi-VN/models.json b/locales/vi-VN/models.json index be81611eb3bbc..faa16fb629269 100644 --- a/locales/vi-VN/models.json +++ b/locales/vi-VN/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "Mô hình này có khả năng hiểu video mạnh mẽ." }, + "step-1o-vision-32k": { + "description": "Mô hình này có khả năng hiểu hình ảnh mạnh mẽ. So với các mô hình trong series step-1v, nó có hiệu suất thị giác vượt trội hơn." + }, "step-1v-32k": { "description": "Hỗ trợ đầu vào hình ảnh, tăng cường trải nghiệm tương tác đa mô hình." }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "Hỗ trợ tương tác ngữ cảnh quy mô lớn, phù hợp cho các tình huống đối thoại phức tạp." }, + "step-2-mini": { + "description": "Mô hình lớn siêu tốc dựa trên kiến trúc Attention tự nghiên cứu thế hệ mới MFA, đạt được hiệu quả tương tự như step1 với chi phí rất thấp, đồng thời duy trì thông lượng cao hơn và độ trễ phản hồi nhanh hơn. Có khả năng xử lý các nhiệm vụ chung, đặc biệt có năng lực trong lập trình." + }, "taichu2_mm": { "description": "Kết hợp khả năng hiểu hình ảnh, chuyển giao kiến thức, suy luận logic, nổi bật trong lĩnh vực hỏi đáp hình ảnh và văn bản." }, diff --git a/locales/zh-CN/chat.json b/locales/zh-CN/chat.json index 9bd8da7119fee..78f4d68870cea 100644 --- a/locales/zh-CN/chat.json +++ b/locales/zh-CN/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "深度思考中", + "thought": "已深度思考(用时 {{duration}} 秒)" + }, "regenerate": "重新生成", "roleAndArchive": "角色与记录", "searchAgentPlaceholder": "搜索助手...", diff --git a/locales/zh-CN/components.json b/locales/zh-CN/components.json index 2728ca27cd340..afed759cbbc60 100644 --- a/locales/zh-CN/components.json +++ b/locales/zh-CN/components.json @@ -76,6 +76,7 @@ "custom": "自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性", "file": "该模型支持上传文件读取与识别", "functionCall": "该模型支持函数调用(Function Call)", + "reasoning": "该模型支持深度思考", "tokens": "该模型单个会话最多支持 {{tokens}} Tokens", "vision": "该模型支持视觉识别" }, diff --git a/locales/zh-CN/modelProvider.json b/locales/zh-CN/modelProvider.json index 4adb3b2a4b454..387c1082ef131 100644 --- a/locales/zh-CN/modelProvider.json +++ b/locales/zh-CN/modelProvider.json @@ -198,9 +198,9 @@ }, "baseURL": { "desc": "必须包含 http(s)://", + "invalid": "请输入合法的 URL", "placeholder": "https://your-proxy-url.com/v1", - "title": "API 代理地址", - "invalid": "请输入合法的 URL" + "title": "API 代理地址" }, "checker": { "button": "检查", diff --git a/locales/zh-CN/models.json b/locales/zh-CN/models.json index a1201ddfb3a7b..5ae858ff2f988 100644 --- a/locales/zh-CN/models.json +++ b/locales/zh-CN/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "该模型拥有强大的视频理解能力。" }, + "step-1o-vision-32k": { + "description": "该模型拥有强大的图像理解能力。相比于 step-1v 系列模型,拥有更强的视觉性能。" + }, "step-1v-32k": { "description": "支持视觉输入,增强多模态交互体验。" }, @@ -1335,7 +1338,10 @@ "description": "小型视觉模型,适合基本的图文任务。" }, "step-2-16k": { - "description": "支持大规模上下文交互,适合复杂对话场景。" + "description": "step-2模型的实验版本,包含最新的特性,滚动更新中。不推荐在正式生产环境使用。" + }, + "step-2-mini": { + "description": "基于新一代自研Attention架构MFA的极速大模型,用极低成本达到和step1类似的效果,同时保持了更高的吞吐和更快响应时延。能够处理通用任务,在代码能力上具备特长。" }, "taichu2_mm": { "description": "融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出" diff --git a/locales/zh-TW/chat.json b/locales/zh-TW/chat.json index 6328062adc38c..9532541c5c26e 100644 --- a/locales/zh-TW/chat.json +++ b/locales/zh-TW/chat.json @@ -82,6 +82,10 @@ } } }, + "reasoning": { + "thinking": "深入思考中", + "thought": "已深度思考(用時 {{duration}} 秒)" + }, "regenerate": "重新生成", "roleAndArchive": "角色與記錄", "searchAgentPlaceholder": "搜尋助手...", diff --git a/locales/zh-TW/components.json b/locales/zh-TW/components.json index ef7a0d63c1ba0..18fcf7d3beda2 100644 --- a/locales/zh-TW/components.json +++ b/locales/zh-TW/components.json @@ -76,6 +76,7 @@ "custom": "自訂模型,預設支援函式呼叫與視覺辨識,請根據實際情況驗證上述能力的可用性", "file": "該模型支援上傳檔案讀取與辨識", "functionCall": "該模型支援函式呼叫(Function Call)", + "reasoning": "該模型支持深度思考", "tokens": "該模型單一會話最多支援 {{tokens}} Tokens", "vision": "該模型支援視覺辨識" }, diff --git a/locales/zh-TW/models.json b/locales/zh-TW/models.json index 2234cc524b0de..e0e7147542c3e 100644 --- a/locales/zh-TW/models.json +++ b/locales/zh-TW/models.json @@ -1328,6 +1328,9 @@ "step-1.5v-mini": { "description": "該模型擁有強大的視頻理解能力。" }, + "step-1o-vision-32k": { + "description": "該模型擁有強大的圖像理解能力。相比於 step-1v 系列模型,擁有更強的視覺性能。" + }, "step-1v-32k": { "description": "支持視覺輸入,增強多模態交互體驗。" }, @@ -1337,6 +1340,9 @@ "step-2-16k": { "description": "支持大規模上下文交互,適合複雜對話場景。" }, + "step-2-mini": { + "description": "基於新一代自研Attention架構MFA的極速大模型,用極低成本達到和step1類似的效果,同時保持了更高的吞吐和更快響應時延。能夠處理通用任務,在程式碼能力上具備特長。" + }, "taichu2_mm": { "description": "融合了圖像理解、知識遷移、邏輯歸因等能力,在圖文問答領域表現突出" }, diff --git a/src/components/ModelSelect/index.tsx b/src/components/ModelSelect/index.tsx index ccf26d754336d..f83a0a35ff349 100644 --- a/src/components/ModelSelect/index.tsx +++ b/src/components/ModelSelect/index.tsx @@ -2,7 +2,7 @@ import { IconAvatarProps, ModelIcon, ProviderIcon } from '@lobehub/icons'; import { Avatar, Icon, Tooltip } from '@lobehub/ui'; import { Typography } from 'antd'; import { createStyles } from 'antd-style'; -import { Infinity, LucideEye, LucidePaperclip, ToyBrick } from 'lucide-react'; +import { Infinity, AtomIcon, LucideEye, LucidePaperclip, ToyBrick } from 'lucide-react'; import numeral from 'numeral'; import { rgba } from 'polished'; import { FC, memo } from 'react'; @@ -45,6 +45,10 @@ const useStyles = createStyles(({ css, token }) => ({ color: ${token.green}; background: ${token.green1}; `, + tagPurple: css` + color: ${token.purple}; + background: ${token.purple1}; + `, token: css` width: 36px; height: 20px; @@ -107,6 +111,17 @@ export const ModelInfoTags = memo( )} + {model.reasoning && ( + +
+ +
+
+ )} {typeof model.contextWindowTokens === 'number' && ( (), model: text('model'), provider: text('provider'), @@ -71,9 +73,6 @@ export const messages = pgTable( }), ); -export type NewMessage = typeof messages.$inferInsert; -export type MessageItem = typeof messages.$inferSelect; - // if the message container a plugin export const messagePlugins = pgTable('message_plugins', { id: text('id') diff --git a/src/database/server/models/__tests__/message.test.ts b/src/database/server/models/__tests__/message.test.ts index ee18eaaaefa0b..98367dcf749a3 100644 --- a/src/database/server/models/__tests__/message.test.ts +++ b/src/database/server/models/__tests__/message.test.ts @@ -3,6 +3,7 @@ import { eq } from 'drizzle-orm/expressions'; import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { getTestDBInstance } from '@/database/server/core/dbForTest'; +import { MessageItem } from '@/types/message'; import { uuid } from '@/utils/uuid'; import { @@ -253,8 +254,8 @@ describe('MessageModel', () => { const result = await messageModel.query(); // 断言结果 - expect(result[0].extra.translate).toEqual({ content: 'translated', from: 'en', to: 'zh' }); - expect(result[0].extra.tts).toEqual({ + expect(result[0].extra!.translate).toEqual({ content: 'translated', from: 'en', to: 'zh' }); + expect(result[0].extra!.tts).toEqual({ contentMd5: 'md5', file: 'f1', voice: 'voice1', @@ -345,7 +346,7 @@ describe('MessageModel', () => { expect(result).toHaveLength(1); expect(result[0].chunksList).toHaveLength(1); - expect(result[0].chunksList[0]).toMatchObject({ + expect(result[0].chunksList![0]).toMatchObject({ text: 'chunk content', similarity: 0.95, }); @@ -655,7 +656,7 @@ describe('MessageModel', () => { const newMessages = [ { id: '1', role: 'user', content: 'message 1' }, { id: '2', role: 'assistant', content: 'message 2' }, - ]; + ] as MessageItem[]; // 调用 batchCreateMessages 方法 await messageModel.batchCreate(newMessages); diff --git a/src/database/server/models/message.ts b/src/database/server/models/message.ts index a5114ea47a6b0..0ea7a3bf57637 100644 --- a/src/database/server/models/message.ts +++ b/src/database/server/models/message.ts @@ -14,16 +14,18 @@ import { idGenerator } from '@/database/utils/idGenerator'; import { ChatFileItem, ChatImageItem, + ChatMessage, ChatTTS, ChatToolPayload, + ChatTranslate, CreateMessageParams, + MessageItem, ModelRankItem, } from '@/types/message'; import { merge } from '@/utils/merge'; import { today } from '@/utils/time'; import { - MessageItem, MessagePluginItem, NewMessageQuery, chunks, @@ -61,7 +63,7 @@ export class MessageModel { options: { postProcessUrl?: (path: string | null, file: { fileType: string }) => Promise; } = {}, - ): Promise => { + ) => { const offset = current * pageSize; // 1. get basic messages @@ -71,6 +73,7 @@ export class MessageModel { id: messages.id, role: messages.role, content: messages.content, + reasoning: messages.reasoning, error: messages.error, model: messages.model, @@ -220,10 +223,11 @@ export class MessageModel { // eslint-disable-next-line @typescript-eslint/no-unused-vars .map(({ id, url, name }) => ({ alt: name!, id, url })), + meta: {}, ragQuery: messageQuery?.rewriteQuery, ragQueryId: messageQuery?.id, ragRawQuery: messageQuery?.userQuery, - }; + } as unknown as ChatMessage; }, ); }; @@ -252,27 +256,33 @@ export class MessageModel { return result[0]; }; - queryAll = async (): Promise => { - return this.db + queryAll = async () => { + const result = await this.db .select() .from(messages) .orderBy(messages.createdAt) .where(eq(messages.userId, this.userId)); + + return result as MessageItem[]; }; - queryBySessionId = async (sessionId?: string | null): Promise => { - return this.db.query.messages.findMany({ + queryBySessionId = async (sessionId?: string | null) => { + const result = await this.db.query.messages.findMany({ orderBy: [asc(messages.createdAt)], where: and(eq(messages.userId, this.userId), this.matchSession(sessionId)), }); + + return result as MessageItem[]; }; - queryByKeyword = async (keyword: string): Promise => { + queryByKeyword = async (keyword: string) => { if (!keyword) return []; - return this.db.query.messages.findMany({ + const result = await this.db.query.messages.findMany({ orderBy: [desc(messages.createdAt)], where: and(eq(messages.userId, this.userId), like(messages.content, `%${keyword}%`)), }); + + return result as MessageItem[]; }; count = async (params?: { @@ -414,6 +424,8 @@ export class MessageModel { pluginState, fileChunks, ragQueryId, + updatedAt, + createdAt, ...message }: CreateMessageParams, id: string = this.genId(), @@ -423,9 +435,12 @@ export class MessageModel { .insert(messages) .values({ ...message, + // TODO: remove this when the client is updated + createdAt: createdAt ? new Date(createdAt) : undefined, id, model: fromModel, provider: fromProvider, + updatedAt: updatedAt ? new Date(updatedAt) : undefined, userId: this.userId, }) .returning()) as MessageItem[]; @@ -466,7 +481,8 @@ export class MessageModel { batchCreate = async (newMessages: MessageItem[]) => { const messagesToInsert = newMessages.map((m) => { - return { ...m, userId: this.userId }; + // TODO: need a better way to handle this + return { ...m, role: m.role as any, userId: this.userId }; }); return this.db.insert(messages).values(messagesToInsert); @@ -482,7 +498,11 @@ export class MessageModel { update = async (id: string, message: Partial) => { return this.db .update(messages) - .set(message) + .set({ + ...message, + // TODO: need a better way to handle this + role: message.role as any, + }) .where(and(eq(messages.id, id), eq(messages.userId, this.userId))); }; @@ -507,7 +527,7 @@ export class MessageModel { return this.db.update(messagePlugins).set(value).where(eq(messagePlugins.id, id)); }; - updateTranslate = async (id: string, translate: Partial) => { + updateTranslate = async (id: string, translate: Partial) => { const result = await this.db.query.messageTranslates.findFirst({ where: and(eq(messageTranslates.id, id)), }); @@ -555,7 +575,9 @@ export class MessageModel { if (message.length === 0) return; // 2. 检查 message 是否包含 tools - const toolCallIds = message[0].tools?.map((tool: ChatToolPayload) => tool.id).filter(Boolean); + const toolCallIds = (message[0].tools as ChatToolPayload[]) + ?.map((tool) => tool.id) + .filter(Boolean); let relatedMessageIds: string[] = []; diff --git a/src/database/server/models/topic.ts b/src/database/server/models/topic.ts index 7c7f757219cf8..f022da4b17754 100644 --- a/src/database/server/models/topic.ts +++ b/src/database/server/models/topic.ts @@ -9,9 +9,10 @@ import { genWhere, } from '@/database/utils/genWhere'; import { idGenerator } from '@/database/utils/idGenerator'; +import { MessageItem } from '@/types/message'; import { TopicRankItem } from '@/types/topic'; -import { NewMessage, TopicItem, messages, topics } from '../../schemas'; +import { TopicItem, messages, topics } from '../../schemas'; export interface CreateTopicParams { favorite?: boolean; @@ -244,7 +245,7 @@ export class TopicModel { id: idGenerator('messages'), topicId: duplicatedTopic.id, }) - .returning()) as NewMessage[]; + .returning()) as MessageItem[]; return result[0]; }), diff --git a/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx b/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx new file mode 100644 index 0000000000000..eccab5e4b8351 --- /dev/null +++ b/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx @@ -0,0 +1,124 @@ +import { Icon, Markdown } from '@lobehub/ui'; +import { createStyles } from 'antd-style'; +import { AtomIcon, ChevronDown, ChevronRight } from 'lucide-react'; +import { rgba } from 'polished'; +import { memo, useEffect, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { Flexbox } from 'react-layout-kit'; + +import { useChatStore } from '@/store/chat'; +import { aiChatSelectors } from '@/store/chat/selectors'; + +const useStyles = createStyles(({ css, token, isDarkMode }) => ({ + container: css` + cursor: pointer; + + width: fit-content; + padding-block: 4px; + padding-inline: 8px; + border-radius: 6px; + + color: ${token.colorTextTertiary}; + + &:hover { + background: ${isDarkMode ? token.colorFillQuaternary : token.colorFillTertiary}; + } + `, + expand: css` + background: ${isDarkMode ? token.colorFillQuaternary : token.colorFillTertiary} !important; + `, + shinyText: css` + color: ${rgba(token.colorText, 0.45)}; + + background: linear-gradient( + 120deg, + ${rgba(token.colorTextBase, 0)} 40%, + ${token.colorTextSecondary} 50%, + ${rgba(token.colorTextBase, 0)} 60% + ); + background-clip: text; + background-clip: text; + background-size: 200% 100%; + + animation: shine 1.5s linear infinite; + + @keyframes shine { + 0% { + background-position: 100%; + } + + 100% { + background-position: -100%; + } + } + `, + title: css` + overflow: hidden; + display: -webkit-box; + -webkit-box-orient: vertical; + -webkit-line-clamp: 1; + + font-size: 12px; + text-overflow: ellipsis; + `, +})); + +interface ThinkingProps { + content?: string; + duration?: number; + id: string; +} + +const Thinking = memo(({ content = '', duration, id }) => { + const { t } = useTranslation('chat'); + const { styles, cx } = useStyles(); + + const [showDetail, setShowDetail] = useState(false); + + const isReasoning = useChatStore(aiChatSelectors.isMessageInReasoning(id)); + + useEffect(() => { + if (isReasoning && !content) { + setShowDetail(true); + } + + if (!isReasoning) { + setShowDetail(false); + } + }, [isReasoning, content]); + + return ( + { + setShowDetail(!showDetail); + }} + > + + {isReasoning ? ( + + + + {t('reasoning.thinking')} + + + ) : ( + + + {t('reasoning.thought', { duration: ((duration || 0) / 1000).toFixed(1) })} + + )} + + + + {showDetail && ( + + {content} + + )} + + ); +}); + +export default Thinking; diff --git a/src/features/Conversation/Messages/Assistant/index.tsx b/src/features/Conversation/Messages/Assistant/index.tsx index e06d850cfba39..f000b79d84d3d 100644 --- a/src/features/Conversation/Messages/Assistant/index.tsx +++ b/src/features/Conversation/Messages/Assistant/index.tsx @@ -3,13 +3,15 @@ import { ReactNode, Suspense, memo, useContext } from 'react'; import { Flexbox } from 'react-layout-kit'; import { LOADING_FLAT } from '@/const/message'; -import { InPortalThreadContext } from '@/features/Conversation/components/ChatItem/InPortalThreadContext'; import { useChatStore } from '@/store/chat'; import { chatSelectors } from '@/store/chat/selectors'; +import { aiChatSelectors } from '@/store/chat/slices/aiChat/selectors'; import { ChatMessage } from '@/types/message'; +import { InPortalThreadContext } from '../../components/ChatItem/InPortalThreadContext'; import { DefaultMessage } from '../Default'; import FileChunks from './FileChunks'; +import Thinking from './Reasoning'; import ToolCall from './ToolCallItem'; export const AssistantMessage = memo< @@ -23,6 +25,10 @@ export const AssistantMessage = memo< const inThread = useContext(InPortalThreadContext); const isToolCallGenerating = generating && (content === LOADING_FLAT || !content) && !!tools; + const isReasoning = useChatStore(aiChatSelectors.isMessageInReasoning(id)); + + const showReasoning = !!props.reasoning || (!props.reasoning && isReasoning); + return editing ? ( {!!chunksList && chunksList.length > 0 && } + {showReasoning && } {content && ( modelName.endsWith(m.id))?.enabled || false, functionCall: modelName.toLowerCase().includes('gemini'), id: modelName, - vision: modelName.toLowerCase().includes('vision') || modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('gemini-1.0'), + vision: + modelName.toLowerCase().includes('vision') || + (modelName.toLowerCase().includes('gemini') && + !modelName.toLowerCase().includes('gemini-1.0')), }; }) .filter(Boolean) as ChatModelCard[]; diff --git a/src/libs/agent-runtime/utils/streams/openai.test.ts b/src/libs/agent-runtime/utils/streams/openai.test.ts index a393ee10f3dd5..a7625ce18764d 100644 --- a/src/libs/agent-runtime/utils/streams/openai.test.ts +++ b/src/libs/agent-runtime/utils/streams/openai.test.ts @@ -552,4 +552,207 @@ describe('OpenAIStream', () => { expect(onToolCallMock).toHaveBeenCalledTimes(2); }); }); + + describe('Reasoning', () => { + it('should handle reasoning event', async () => { + const data = [ + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { role: 'assistant', content: null, reasoning_content: '' }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: null, reasoning_content: '您好' }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: null, reasoning_content: '!' }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '你好', reasoning_content: null }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '很高兴', reasoning_cont: null }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '为您', reasoning_content: null }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '提供', reasoning_content: null }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '帮助。', reasoning_content: null }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: '1', + object: 'chat.completion.chunk', + created: 1737563070, + model: 'deepseek-reasoner', + system_fingerprint: 'fp_1c5d8833bc', + choices: [ + { + index: 0, + delta: { content: '', reasoning_content: null }, + logprobs: null, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 6, + completion_tokens: 104, + total_tokens: 110, + prompt_tokens_details: { cached_tokens: 0 }, + completion_tokens_details: { reasoning_tokens: 70 }, + prompt_cache_hit_tokens: 0, + prompt_cache_miss_tokens: 6, + }, + }, + ]; + + const mockOpenAIStream = new ReadableStream({ + start(controller) { + data.forEach((chunk) => { + controller.enqueue(chunk); + }); + + controller.close(); + }, + }); + + const protocolStream = OpenAIStream(mockOpenAIStream); + + const decoder = new TextDecoder(); + const chunks = []; + + // @ts-ignore + for await (const chunk of protocolStream) { + chunks.push(decoder.decode(chunk, { stream: true })); + } + + expect(chunks).toEqual( + [ + 'id: 1', + 'event: reasoning', + `data: ""\n`, + 'id: 1', + 'event: reasoning', + `data: "您好"\n`, + 'id: 1', + 'event: reasoning', + `data: "!"\n`, + 'id: 1', + 'event: text', + `data: "你好"\n`, + 'id: 1', + 'event: text', + `data: "很高兴"\n`, + 'id: 1', + 'event: text', + `data: "为您"\n`, + 'id: 1', + 'event: text', + `data: "提供"\n`, + 'id: 1', + 'event: text', + `data: "帮助。"\n`, + 'id: 1', + 'event: stop', + `data: "stop"\n`, + ].map((i) => `${i}\n`), + ); + }); + }); }); diff --git a/src/libs/agent-runtime/utils/streams/openai.ts b/src/libs/agent-runtime/utils/streams/openai.ts index 24670146db417..07d06ce653edc 100644 --- a/src/libs/agent-runtime/utils/streams/openai.ts +++ b/src/libs/agent-runtime/utils/streams/openai.ts @@ -45,6 +45,7 @@ export const transformOpenAIStream = ( return { data: chunk, id: chunk.id, type: 'data' }; } + // tools calling if (typeof item.delta?.tool_calls === 'object' && item.delta.tool_calls?.length > 0) { return { data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => { @@ -91,7 +92,13 @@ export const transformOpenAIStream = ( return { data: item.delta.content, id: chunk.id, type: 'text' }; } - if (item.delta?.content === null) { + // 无内容情况 + if (item.delta && item.delta.content === null) { + // deepseek reasoner 会将 thinking 放在 reasoning_content 字段中 + if ('reasoning_content' in item.delta && typeof item.delta.reasoning_content === 'string') { + return { data: item.delta.reasoning_content, id: chunk.id, type: 'reasoning' }; + } + return { data: item.delta, id: chunk.id, type: 'data' }; } diff --git a/src/libs/agent-runtime/utils/streams/protocol.ts b/src/libs/agent-runtime/utils/streams/protocol.ts index dade76f2dc4fc..1cb16e5cdfbe0 100644 --- a/src/libs/agent-runtime/utils/streams/protocol.ts +++ b/src/libs/agent-runtime/utils/streams/protocol.ts @@ -15,7 +15,7 @@ export interface StreamStack { export interface StreamProtocolChunk { data: any; id?: string; - type: 'text' | 'tool_calls' | 'data' | 'stop' | 'error'; + type: 'text' | 'tool_calls' | 'data' | 'stop' | 'error' | 'reasoning'; } export interface StreamToolCallChunkData { diff --git a/src/locales/default/chat.ts b/src/locales/default/chat.ts index 29087c9260e30..fb7d764b7c6f3 100644 --- a/src/locales/default/chat.ts +++ b/src/locales/default/chat.ts @@ -84,6 +84,10 @@ export default { }, }, }, + reasoning: { + thinking: '深度思考中', + thought: '已深度思考(用时 {{duration}} 秒)', + }, regenerate: '重新生成', roleAndArchive: '角色与记录', searchAgentPlaceholder: '搜索助手...', diff --git a/src/locales/default/components.ts b/src/locales/default/components.ts index 9afc63d92c229..4726dda59178b 100644 --- a/src/locales/default/components.ts +++ b/src/locales/default/components.ts @@ -78,6 +78,7 @@ export default { custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性', file: '该模型支持上传文件读取与识别', functionCall: '该模型支持函数调用(Function Call)', + reasoning: '该模型支持深度思考', tokens: '该模型单个会话最多支持 {{tokens}} Tokens', vision: '该模型支持视觉识别', }, diff --git a/src/server/routers/lambda/message.ts b/src/server/routers/lambda/message.ts index 7ad269b8fed79..f165f95dd7db9 100644 --- a/src/server/routers/lambda/message.ts +++ b/src/server/routers/lambda/message.ts @@ -63,10 +63,12 @@ export const messageRouter = router({ return data.id; }), + // TODO: it will be removed in V2 getAllMessages: messageProcedure.query(async ({ ctx }): Promise => { - return ctx.messageModel.queryAll(); + return ctx.messageModel.queryAll() as any; }), + // TODO: it will be removed in V2 getAllMessagesInSession: messageProcedure .input( z.object({ @@ -74,7 +76,7 @@ export const messageRouter = router({ }), ) .query(async ({ ctx, input }): Promise => { - return ctx.messageModel.queryBySessionId(input.sessionId); + return ctx.messageModel.queryBySessionId(input.sessionId) as any; }), getHeatmaps: messageProcedure.query(async ({ ctx }) => { diff --git a/src/services/message/client.test.ts b/src/services/message/client.test.ts index b07e1e70a011c..21a66dcdfce18 100644 --- a/src/services/message/client.test.ts +++ b/src/services/message/client.test.ts @@ -5,7 +5,6 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { MessageModel } from '@/database/_deprecated/models/message'; import { clientDB, initializeDB } from '@/database/client/db'; import { - MessageItem, files, messagePlugins, messageTTS, @@ -21,6 +20,7 @@ import { ChatTTS, ChatTranslate, CreateMessageParams, + MessageItem, } from '@/types/message'; import { ClientService } from './client'; diff --git a/src/services/message/type.ts b/src/services/message/type.ts index 292adde55f806..25d259bf4827d 100644 --- a/src/services/message/type.ts +++ b/src/services/message/type.ts @@ -1,12 +1,12 @@ import type { HeatmapsProps } from '@lobehub/charts'; -import { MessageItem } from '@/database/schemas'; import { ChatMessage, ChatMessageError, ChatTTS, ChatTranslate, CreateMessageParams, + MessageItem, ModelRankItem, } from '@/types/message'; diff --git a/src/store/chat/selectors.ts b/src/store/chat/selectors.ts index 742459e06780a..f68f24aaf8d62 100644 --- a/src/store/chat/selectors.ts +++ b/src/store/chat/selectors.ts @@ -1,3 +1,4 @@ +export { aiChatSelectors } from './slices/aiChat/selectors'; export { chatToolSelectors } from './slices/builtinTool/selectors'; export { chatSelectors } from './slices/message/selectors'; export * from './slices/portal/selectors'; diff --git a/src/store/chat/slices/aiChat/actions/generateAIChat.ts b/src/store/chat/slices/aiChat/actions/generateAIChat.ts index d1d355c8d5561..77cd04fca49a7 100644 --- a/src/store/chat/slices/aiChat/actions/generateAIChat.ts +++ b/src/store/chat/slices/aiChat/actions/generateAIChat.ts @@ -72,7 +72,7 @@ export interface AIGenerateAction { */ internal_fetchAIChatMessage: ( messages: ChatMessage[], - assistantMessageId: string, + messageId: string, params?: ProcessMessageParams, ) => Promise<{ isFunctionCall: boolean; @@ -102,6 +102,14 @@ export interface AIGenerateAction { * Controls the streaming state of tool calling processes, updating the UI accordingly */ internal_toggleToolCallingStreaming: (id: string, streaming: boolean[] | undefined) => void; + /** + * Toggles the loading state for AI message reasoning, managing the UI feedback + */ + internal_toggleChatReasoning: ( + loading: boolean, + id?: string, + action?: string, + ) => AbortController | undefined; } export const generateAIChat: StateCreator< @@ -357,19 +365,20 @@ export const generateAIChat: StateCreator< await get().internal_summaryHistory(historyMessages); } }, - internal_fetchAIChatMessage: async (messages, assistantId, params) => { + internal_fetchAIChatMessage: async (messages, messageId, params) => { const { internal_toggleChatLoading, refreshMessages, internal_updateMessageContent, internal_dispatchMessage, internal_toggleToolCallingStreaming, + internal_toggleChatReasoning, } = get(); const abortController = internal_toggleChatLoading( true, - assistantId, - n('generateMessage(start)', { assistantId, messages }) as string, + messageId, + n('generateMessage(start)', { messageId, messages }) as string, ); const agentConfig = getAgentConfig(); @@ -414,6 +423,9 @@ export const generateAIChat: StateCreator< let isFunctionCall = false; let msgTraceId: string | undefined; let output = ''; + let thinking = ''; + let thinkingStartAt: number; + let duration: number; const historySummary = topicSelectors.currentActiveTopicSummary(get()); await chatService.createAssistantMessageStream({ @@ -434,43 +446,74 @@ export const generateAIChat: StateCreator< }, isWelcomeQuestion: params?.isWelcomeQuestion, onErrorHandle: async (error) => { - await messageService.updateMessageError(assistantId, error); + await messageService.updateMessageError(messageId, error); await refreshMessages(); }, - onFinish: async (content, { traceId, observationId, toolCalls }) => { + onFinish: async (content, { traceId, observationId, toolCalls, reasoning }) => { // if there is traceId, update it if (traceId) { msgTraceId = traceId; - await messageService.updateMessage(assistantId, { + await messageService.updateMessage(messageId, { traceId, observationId: observationId ?? undefined, }); } if (toolCalls && toolCalls.length > 0) { - internal_toggleToolCallingStreaming(assistantId, undefined); + internal_toggleToolCallingStreaming(messageId, undefined); } // update the content after fetch result - await internal_updateMessageContent(assistantId, content, toolCalls); + await internal_updateMessageContent( + messageId, + content, + toolCalls, + !!reasoning ? { content: reasoning, duration } : undefined, + ); }, onMessageHandle: async (chunk) => { switch (chunk.type) { case 'text': { output += chunk.text; + + // if there is no duration, it means the end of reasoning + if (!duration) { + duration = Date.now() - thinkingStartAt; + internal_toggleChatReasoning(false, messageId, n('generateMessage(end)') as string); + } + internal_dispatchMessage({ - id: assistantId, + id: messageId, type: 'updateMessage', - value: { content: output }, + value: { + content: output, + reasoning: !!thinking ? { content: thinking, duration } : undefined, + }, + }); + break; + } + case 'reasoning': { + // if there is no thinkingStartAt, it means the start of reasoning + if (!thinkingStartAt) { + thinkingStartAt = Date.now(); + internal_toggleChatReasoning(true, messageId, n('generateMessage(end)') as string); + } + + thinking += chunk.text; + + internal_dispatchMessage({ + id: messageId, + type: 'updateMessage', + value: { reasoning: { content: thinking } }, }); break; } // is this message is just a tool call case 'tool_calls': { - internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives); + internal_toggleToolCallingStreaming(messageId, chunk.isAnimationActives); internal_dispatchMessage({ - id: assistantId, + id: messageId, type: 'updateMessage', value: { tools: get().internal_transformToolCalls(chunk.tool_calls) }, }); @@ -480,7 +523,7 @@ export const generateAIChat: StateCreator< }, }); - internal_toggleChatLoading(false, assistantId, n('generateMessage(end)') as string); + internal_toggleChatLoading(false, messageId, n('generateMessage(end)') as string); return { isFunctionCall, @@ -540,6 +583,9 @@ export const generateAIChat: StateCreator< internal_toggleChatLoading: (loading, id, action) => { return get().internal_toggleLoadingArrays('chatLoadingIds', loading, id, action); }, + internal_toggleChatReasoning: (loading, id, action) => { + return get().internal_toggleLoadingArrays('reasoningLoadingIds', loading, id, action); + }, internal_toggleToolCallingStreaming: (id, streaming) => { set( { diff --git a/src/store/chat/slices/aiChat/initialState.ts b/src/store/chat/slices/aiChat/initialState.ts index 81849b6118e83..59ec88a82719e 100644 --- a/src/store/chat/slices/aiChat/initialState.ts +++ b/src/store/chat/slices/aiChat/initialState.ts @@ -11,6 +11,10 @@ export interface ChatAIChatState { */ messageRAGLoadingIds: string[]; pluginApiLoadingIds: string[]; + /** + * is the AI message is reasoning + */ + reasoningLoadingIds: string[]; /** * the tool calling stream ids */ @@ -23,5 +27,6 @@ export const initialAiChatState: ChatAIChatState = { inputMessage: '', messageRAGLoadingIds: [], pluginApiLoadingIds: [], + reasoningLoadingIds: [], toolCallingStreamIds: {}, }; diff --git a/src/store/chat/slices/aiChat/selectors.ts b/src/store/chat/slices/aiChat/selectors.ts new file mode 100644 index 0000000000000..0c4f4cf7ca7e0 --- /dev/null +++ b/src/store/chat/slices/aiChat/selectors.ts @@ -0,0 +1,9 @@ + +import type { ChatStoreState } from '../../initialState'; + +const isMessageInReasoning = (id: string) => (s: ChatStoreState) => + s.reasoningLoadingIds.includes(id); + +export const aiChatSelectors = { + isMessageInReasoning, +}; diff --git a/src/store/chat/slices/message/action.ts b/src/store/chat/slices/message/action.ts index b840ccf24cc43..9411c78f83ceb 100644 --- a/src/store/chat/slices/message/action.ts +++ b/src/store/chat/slices/message/action.ts @@ -17,6 +17,7 @@ import { ChatMessageError, CreateMessageParams, MessageToolCall, + ModelReasoning, } from '@/types/message'; import { TraceEventPayloads } from '@/types/trace'; import { setNamespace } from '@/utils/storeDebug'; @@ -73,6 +74,7 @@ export interface ChatMessageAction { id: string, content: string, toolCalls?: MessageToolCall[], + reasoning?: ModelReasoning, ) => Promise; /** * update the message error with optimistic update @@ -270,7 +272,7 @@ export const chatMessage: StateCreator< await messageService.updateMessage(id, { error }); await get().refreshMessages(); }, - internal_updateMessageContent: async (id, content, toolCalls) => { + internal_updateMessageContent: async (id, content, toolCalls, reasoning) => { const { internal_dispatchMessage, refreshMessages, internal_transformToolCalls } = get(); // Due to the async update method and refresh need about 100ms @@ -289,6 +291,7 @@ export const chatMessage: StateCreator< await messageService.updateMessage(id, { content, tools: toolCalls ? internal_transformToolCalls(toolCalls) : undefined, + reasoning, }); await refreshMessages(); }, diff --git a/src/types/aiModel.ts b/src/types/aiModel.ts index 1813b1557baa3..8e2f0c3076add 100644 --- a/src/types/aiModel.ts +++ b/src/types/aiModel.ts @@ -30,6 +30,10 @@ export interface ModelAbilities { * whether model supports function call */ functionCall?: boolean; + /** + * whether model supports reasoning + */ + reasoning?: boolean; /** * whether model supports vision */ @@ -126,20 +130,7 @@ export interface AiModelConfig { } export interface AIChatModelCard extends AIBaseModelCard { - abilities?: { - /** - * whether model supports file upload - */ - files?: boolean; - /** - * whether model supports function call - */ - functionCall?: boolean; - /** - * whether model supports vision - */ - vision?: boolean; - }; + abilities?: ModelAbilities; config?: AiModelConfig; maxOutput?: number; pricing?: ChatModelPricing; diff --git a/src/types/message/base.ts b/src/types/message/base.ts new file mode 100644 index 0000000000000..788c417593dfd --- /dev/null +++ b/src/types/message/base.ts @@ -0,0 +1,59 @@ +export interface ModelReasoning { + content?: string; + duration?: number; +} + +export type MessageRoleType = 'user' | 'system' | 'assistant' | 'tool'; + +export interface MessageItem { + agentId: string | null; + clientId: string | null; + content: string | null; + createdAt: Date; + error: any | null; + favorite: boolean | null; + id: string; + model: string | null; + observationId: string | null; + parentId: string | null; + provider: string | null; + quotaId: string | null; + reasoning: ModelReasoning | null; + role: string; + sessionId: string | null; + threadId: string | null; + // jsonb type + tools: any | null; + topicId: string | null; + // jsonb type + traceId: string | null; + updatedAt: Date; + userId: string; +} + +export interface NewMessage { + agentId?: string | null; + clientId?: string | null; + content?: string | null; + createdAt?: Date; + // optional because it has a default value + error?: any | null; + favorite?: boolean; + id?: string; + model?: string | null; + observationId?: string | null; + parentId?: string | null; + provider?: string | null; + quotaId?: string | null; + // optional because it has a default function + role: 'user' | 'system' | 'assistant' | 'tool'; + // required because it's notNull + sessionId?: string | null; + threadId?: string | null; + tools?: any | null; + topicId?: string | null; + traceId?: string | null; + // optional because it's generated + updatedAt?: Date; + userId: string; // optional because it's generated +} diff --git a/src/types/message/chat.ts b/src/types/message/chat.ts new file mode 100644 index 0000000000000..8a99730c5c662 --- /dev/null +++ b/src/types/message/chat.ts @@ -0,0 +1,136 @@ +import { IPluginErrorType } from '@lobehub/chat-plugin-sdk'; + +import { ILobeAgentRuntimeErrorType } from '@/libs/agent-runtime'; +import { ErrorType } from '@/types/fetch'; +import { MessageRoleType, ModelReasoning } from '@/types/message/base'; +import { ChatPluginPayload, ChatToolPayload } from '@/types/message/tools'; +import { Translate } from '@/types/message/translate'; +import { MetaData } from '@/types/meta'; +import { MessageSemanticSearchChunk } from '@/types/rag'; + +/** + * 聊天消息错误对象 + */ +export interface ChatMessageError { + body?: any; + message: string; + type: ErrorType | IPluginErrorType | ILobeAgentRuntimeErrorType; +} + +export interface ChatTranslate extends Translate { + content?: string; +} + +export interface ChatTTS { + contentMd5?: string; + file?: string; + voice?: string; +} + +export interface ChatFileItem { + fileType: string; + id: string; + name: string; + size: number; + url: string; +} + +export interface ChatImageItem { + alt: string; + id: string; + url: string; +} + +export interface ChatFileChunk { + fileId: string; + fileType: string; + fileUrl: string; + filename: string; + id: string; + similarity?: number; + text: string; +} + +export interface ChatMessageExtra { + fromModel?: string; + fromProvider?: string; + // 翻译 + translate?: ChatTranslate | false | null; + // TTS + tts?: ChatTTS; +} + +export interface ChatMessage { + chunksList?: ChatFileChunk[]; + content: string; + createdAt: number; + error?: ChatMessageError | null; + // 扩展字段 + extra?: ChatMessageExtra; + + fileList?: ChatFileItem[]; + /** + * this is a deprecated field, only use in client db + * and should be remove after migrate to pglite + * this field is replaced by fileList and imageList + * @deprecated + */ + files?: string[]; + id: string; + imageList?: ChatImageItem[]; + meta: MetaData; + + /** + * observation id + */ + observationId?: string; + /** + * parent message id + */ + parentId?: string; + + plugin?: ChatPluginPayload; + pluginState?: any; + /** + * quoted other message's id + */ + quotaId?: string; + ragQuery?: string | null; + ragQueryId?: string | null; + ragRawQuery?: string | null; + + reasoning?: ModelReasoning | null; + + /** + * message role type + */ + role: MessageRoleType; + sessionId?: string; + threadId?: string | null; + tool_call_id?: string; + tools?: ChatToolPayload[]; + /** + * 保存到主题的消息 + */ + topicId?: string; + /** + * 观测链路 id + */ + traceId?: string; + updatedAt: number; +} + +export interface CreateMessageParams + extends Partial> { + content: string; + error?: ChatMessageError | null; + fileChunks?: MessageSemanticSearchChunk[]; + files?: string[]; + fromModel?: string; + fromProvider?: string; + role: MessageRoleType; + sessionId: string; + threadId?: string | null; + topicId?: string; + traceId?: string; +} diff --git a/src/types/message/index.ts b/src/types/message/index.ts index 43b213d607de9..87d19d9754920 100644 --- a/src/types/message/index.ts +++ b/src/types/message/index.ts @@ -1,142 +1,9 @@ -import { IPluginErrorType } from '@lobehub/chat-plugin-sdk'; - -import { ILobeAgentRuntimeErrorType } from '@/libs/agent-runtime'; -import { ErrorType } from '@/types/fetch'; import { UploadFileItem } from '@/types/files'; -import { MessageSemanticSearchChunk } from '@/types/rag'; - -import { BaseDataModel } from '../meta'; -import { ChatPluginPayload, ChatToolPayload } from './tools'; -import { Translate } from './translate'; - -export type MessageRoleType = 'user' | 'system' | 'assistant' | 'tool'; - -/** - * 聊天消息错误对象 - */ -export interface ChatMessageError { - body?: any; - message: string; - type: ErrorType | IPluginErrorType | ILobeAgentRuntimeErrorType; -} - -export interface ChatTranslate extends Translate { - content?: string; -} - -export interface ChatTTS { - contentMd5?: string; - file?: string; - voice?: string; -} +export * from './base'; +export * from './chat'; export * from './tools'; -export interface ChatFileItem { - fileType: string; - id: string; - name: string; - size: number; - url: string; -} - -export interface ChatImageItem { - alt: string; - id: string; - url: string; -} - -export interface ChatFileChunk { - fileId: string; - fileType: string; - fileUrl: string; - filename: string; - id: string; - similarity?: number; - text: string; -} - -export interface ChatMessageExtra { - fromModel?: string; - fromProvider?: string; - // 翻译 - translate?: ChatTranslate | false | null; - // TTS - tts?: ChatTTS; -} - -export interface ChatMessage extends BaseDataModel { - chunksList?: ChatFileChunk[]; - content: string; - error?: ChatMessageError | null; - - // 扩展字段 - extra?: ChatMessageExtra; - fileList?: ChatFileItem[]; - /** - * this is a deprecated field, only use in client db - * and should be remove after migrate to pglite - * this field is replaced by fileList and imageList - * @deprecated - */ - files?: string[]; - imageList?: ChatImageItem[]; - /** - * observation id - */ - observationId?: string; - - /** - * parent message id - */ - parentId?: string; - plugin?: ChatPluginPayload; - - pluginState?: any; - /** - * quoted other message's id - */ - quotaId?: string; - ragQuery?: string | null; - ragQueryId?: string | null; - ragRawQuery?: string | null; - /** - * message role type - */ - role: MessageRoleType; - - sessionId?: string; - threadId?: string | null; - - tool_call_id?: string; - tools?: ChatToolPayload[]; - /** - * 保存到主题的消息 - */ - topicId?: string; - /** - * 观测链路 id - */ - traceId?: string; -} - -export type ChatMessageMap = Record; - -export interface CreateMessageParams - extends Partial> { - content: string; - error?: ChatMessageError | null; - fileChunks?: MessageSemanticSearchChunk[]; - files?: string[]; - fromModel?: string; - fromProvider?: string; - role: MessageRoleType; - sessionId: string; - threadId?: string | null; - topicId?: string; - traceId?: string; -} - export interface SendMessageParams { /** * create a thread diff --git a/src/utils/fetch/__tests__/fetchSSE.test.ts b/src/utils/fetch/__tests__/fetchSSE.test.ts index 5020c8166844f..f6dc47ab0e10d 100644 --- a/src/utils/fetch/__tests__/fetchSSE.test.ts +++ b/src/utils/fetch/__tests__/fetchSSE.test.ts @@ -154,6 +154,40 @@ describe('fetchSSE', () => { }); }); + it('should handle reasoning event with smoothing correctly', async () => { + const mockOnMessageHandle = vi.fn(); + const mockOnFinish = vi.fn(); + + (fetchEventSource as any).mockImplementationOnce( + async (url: string, options: FetchEventSourceInit) => { + options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any); + options.onmessage!({ event: 'reasoning', data: JSON.stringify('Hello') } as any); + await sleep(100); + options.onmessage!({ event: 'reasoning', data: JSON.stringify(' World') } as any); + await sleep(100); + options.onmessage!({ event: 'text', data: JSON.stringify('hi') } as any); + }, + ); + + await fetchSSE('/', { + onMessageHandle: mockOnMessageHandle, + onFinish: mockOnFinish, + smoothing: true, + }); + + expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'reasoning' }); + expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'reasoning' }); + expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' Wor', type: 'reasoning' }); + // more assertions for each character... + expect(mockOnFinish).toHaveBeenCalledWith('hi', { + observationId: null, + toolCalls: undefined, + reasoning: 'Hello World', + traceId: null, + type: 'done', + }); + }); + it('should handle tool_calls event with smoothing correctly', async () => { const mockOnMessageHandle = vi.fn(); const mockOnFinish = vi.fn(); diff --git a/src/utils/fetch/fetchSSE.ts b/src/utils/fetch/fetchSSE.ts index e12e91fecdd08..f37d22d40bd6f 100644 --- a/src/utils/fetch/fetchSSE.ts +++ b/src/utils/fetch/fetchSSE.ts @@ -21,6 +21,7 @@ export type OnFinishHandler = ( text: string, context: { observationId?: string | null; + reasoning?: string; toolCalls?: MessageToolCall[]; traceId?: string | null; type?: SSEFinishType; @@ -32,6 +33,11 @@ export interface MessageTextChunk { type: 'text'; } +export interface MessageReasoningChunk { + text: string; + type: 'reasoning'; +} + interface MessageToolCallsChunk { isAnimationActives?: boolean[]; tool_calls: MessageToolCall[]; @@ -43,7 +49,9 @@ export interface FetchSSEOptions { onAbort?: (text: string) => Promise; onErrorHandle?: (error: ChatMessageError) => void; onFinish?: OnFinishHandler; - onMessageHandle?: (chunk: MessageTextChunk | MessageToolCallsChunk) => void; + onMessageHandle?: ( + chunk: MessageTextChunk | MessageToolCallsChunk | MessageReasoningChunk, + ) => void; smoothing?: SmoothingParams | boolean; } @@ -233,7 +241,6 @@ const createSmoothToolCalls = (params: { */ // eslint-disable-next-line no-undef export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptions = {}) => { - let output = ''; let toolCalls: undefined | MessageToolCall[]; let triggerOnMessageHandler = false; @@ -247,6 +254,7 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio typeof smoothing === 'boolean' ? smoothing : (smoothing?.toolsCalling ?? true); const smoothingSpeed = isObject(smoothing) ? smoothing.speed : undefined; + let output = ''; const textController = createSmoothMessage({ onTextUpdate: (delta, text) => { output = text; @@ -255,6 +263,15 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio startSpeed: smoothingSpeed, }); + let thinking = ''; + const thinkingController = createSmoothMessage({ + onTextUpdate: (delta, text) => { + thinking = text; + options.onMessageHandle?.({ text: delta, type: 'reasoning' }); + }, + startSpeed: smoothingSpeed, + }); + const toolCallsController = createSmoothToolCalls({ onToolCallsUpdate: (toolCalls, isAnimationActives) => { options.onMessageHandle?.({ isAnimationActives, tool_calls: toolCalls, type: 'tool_calls' }); @@ -333,6 +350,18 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio break; } + case 'reasoning': { + if (textSmoothing) { + thinkingController.pushToQueue(data); + + if (!thinkingController.isAnimationActive) thinkingController.startAnimation(); + } else { + thinking += data; + options.onMessageHandle?.({ text: data, type: 'reasoning' }); + } + + break; + } case 'tool_calls': { // get finial @@ -389,7 +418,13 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio await toolCallsController.startAnimations(END_ANIMATION_SPEED); } - await options?.onFinish?.(output, { observationId, toolCalls, traceId, type: finishedType }); + await options?.onFinish?.(output, { + observationId, + reasoning: !!thinking ? thinking : undefined, + toolCalls, + traceId, + type: finishedType, + }); } }