diff --git a/AiServer/wwwroot/lib/data/ai-models.json b/AiServer/wwwroot/lib/data/ai-models.json index 925bc60..3b55ac4 100644 --- a/AiServer/wwwroot/lib/data/ai-models.json +++ b/AiServer/wwwroot/lib/data/ai-models.json @@ -826,6 +826,36 @@ "description": "QwQ is an experimental research model focused on advancing AI reasoning capabilities.", "icon": "/img/models/qwen.svg" }, + { + "id": "qwen-turbo", + "tags": [ + "14b" + ], + "latest": "14b", + "website": "https://openrouter.ai/qwen/qwen-turbo", + "description": "Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.", + "icon": "/img/models/qwen.svg" + }, + { + "id": "qwen-plus", + "tags": [ + "72b" + ], + "latest": "72b", + "website": "https://openrouter.ai/qwen/qwen-plus", + "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.", + "icon": "/img/models/qwen.svg" + }, + { + "id": "qwen-max", + "tags": [ + "100b" + ], + "latest": "72b", + "website": "https://openrouter.ai/qwen/qwen-max", + "description": "Qwen-Max, based on Qwen2.5, provides the best inference performance among Qwen models, especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion tokens and further post-trained with curated Supervised Fine-Tuning (SFT) and Reinforcement Learning from Human Feedback (RLHF) methodologies.", + "icon": "/img/models/qwen.svg" + }, { "id": "samantha-mistral", "tags": [ diff --git a/AiServer/wwwroot/lib/data/ai-types.json b/AiServer/wwwroot/lib/data/ai-types.json index 7543111..87e4093 100644 --- a/AiServer/wwwroot/lib/data/ai-types.json +++ b/AiServer/wwwroot/lib/data/ai-types.json @@ -61,6 +61,9 @@ "qwen2-5:7b": "qwen/qwen-2.5-7b-instruct", "qwen2-5:72b": "qwen/qwen-2.5-72b-instruct", "qwen2.5-coder:32b": "qwen/qwen-2.5-coder-32b-instruct", + "qwen-turbo": "qwen/qwen-turbo", + "qwen-plus": "qwen/qwen-plus", + "qwen-max": "qwen/qwen-max", "qwq:32b": "qwen/qwq-32b-preview", "wizardlm2:7b": "microsoft/wizardlm-2-7b", "wizardlm2:8x22b": "microsoft/wizardlm-2-8x22b",