diff --git a/AiServer/wwwroot/lib/data/ai-models.json b/AiServer/wwwroot/lib/data/ai-models.json index 51fad39..443d618 100644 --- a/AiServer/wwwroot/lib/data/ai-models.json +++ b/AiServer/wwwroot/lib/data/ai-models.json @@ -840,6 +840,13 @@ "description": "Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.", "icon": "/img/models/qwen.svg" }, + { + "id": "qwen-vl-plus", + "tags": [], + "website": "https://openrouter.ai/qwen/qwen-vl-plus", + "description": "Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for image input. It delivers significant performance across a broad range of visual tasks.", + "icon": "/img/models/qwen.svg" + }, { "id": "qwen-max", "tags": [], @@ -1080,6 +1087,13 @@ "website": "https://ollama.ai/library/zephyr", "description": "Zephyr is a series of fine-tuned versions of the Mistral and Mixtral models that are trained to act as helpful assistants." }, + { + "id": "gemini-flash", + "tags": [], + "website": "https://openrouter.ai/models/google/gemini-flash-1.5", + "description": "Gemini 1.5 Flash is a foundation model that performs well at a variety of multimodal tasks such as visual understanding, classification, summarization, and creating content from image, audio and video. It's adept at processing visual and text inputs such as photographs, documents, infographics, and screenshots.", + "icon": "/img/models/gemini-pro.svg" + }, { "id": "gemini-pro", "tags": [], @@ -1102,10 +1116,31 @@ "icon": "/img/models/gemini-pro.svg" }, { - "id": "gemini-flash", + "id": "gemini-pro-2.0", "tags": [], - "website": "https://openrouter.ai/models/google/gemini-flash-1.5", - "description": "Gemini 1.5 Flash is a foundation model that performs well at a variety of multimodal tasks such as visual understanding, classification, summarization, and creating content from image, audio and video. It's adept at processing visual and text inputs such as photographs, documents, infographics, and screenshots.", + "website": "https://openrouter.ai/google/gemini-2.0-pro-exp-02-05:free", + "description": "Gemini 2.0 Pro Experimental is a bleeding-edge version of the Gemini 2.0 Pro model. Because it's currently experimental, it will be heavily rate-limited by Google", + "icon": "/img/models/gemini-pro.svg" + }, + { + "id": "gemini-flash-2.0", + "tags": [], + "website": "https://openrouter.ai/google/gemini-2.0-flash-001", + "description": "Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to Gemini Flash 1.5, while maintaining quality on par with larger models like Gemini Pro 1.5. It introduces notable enhancements in multimodal understanding, coding capabilities, complex instruction following, and function calling. These advancements come together to deliver more seamless and robust agentic experiences.", + "icon": "/img/models/gemini-pro.svg" + }, + { + "id": "gemini-flash-lite-2.0", + "tags": [], + "website": "https://openrouter.ai/google/gemini-2.0-flash-lite-preview-02-05:free", + "description": "Gemini Flash Lite 2.0 offers a significantly faster time to first token (TTFT) compared to Gemini Flash 1.5, while maintaining quality on par with larger models like Gemini Pro 1.5. Because it's currently in preview, it will be heavily rate-limited by Google. This model will move from free to paid pending a general rollout on February 24th, at $0.075 / $0.30 per million input / ouput tokens respectively.", + "icon": "/img/models/gemini-pro.svg" + }, + { + "id": "gemini-flash-thinking-2.0", + "tags": [], + "website": "https://openrouter.ai/google/gemini-2.0-flash-thinking-exp:free", + "description": "Gemini 2.0 Flash Thinking Mode is an experimental model that's trained to generate the \"thinking process\" the model goes through as part of its response. As a result, Thinking Mode is capable of stronger reasoning capabilities in its responses than the base Gemini 2.0 Flash model.", "icon": "/img/models/gemini-pro.svg" }, { diff --git a/AiServer/wwwroot/lib/data/ai-types.json b/AiServer/wwwroot/lib/data/ai-types.json index 87e4093..87cfc33 100644 --- a/AiServer/wwwroot/lib/data/ai-types.json +++ b/AiServer/wwwroot/lib/data/ai-types.json @@ -72,6 +72,7 @@ "gemini-pro-vision": "google/gemini-pro-vision", "gemini-pro-1.5": "google/gemini-pro-1.5", "gemini-flash": "google/gemini-flash-1.5", + "gemini-flash-2.0": "google/gemini-2.0-flash-001", "gpt-3.5-turbo": "openai/gpt-3.5-turbo", "gpt-4": "openai/gpt-4", "gpt-4o": "openai/gpt-4o", @@ -100,12 +101,14 @@ "gemma:7b": "google/gemma-7b-it:free", "gemma2:9b": "google/gemma-2-9b-it:free", "gemini-flash-2.0": "google/gemini-2.0-flash-exp:free", + "gemini-flash-lite-2.0": "google/gemini-2.0-flash-lite-preview-02-05:free", "gemini-flash-thinking-2.0": "google/gemini-2.0-flash-thinking-exp:free", + "gemini-pro-2.0": "google/gemini-2.0-pro-exp-02-05:free", "qwen2:7b": "qwen/qwen-2-7b-instruct:free", "deepseek-r1:671b": "deepseek/deepseek-r1:free", "phi3:3.8b": "microsoft/phi-3-mini-128k-instruct:free", "phi3:14b": "microsoft/phi-3-medium-128k-instruct:free", - "zephyr:7b": "huggingfaceh4/zephyr-7b-beta:free" + "qwen-vl-plus": "qwen/qwen-vl-plus:free" } }, { @@ -156,11 +159,13 @@ "apiModels": { "gemini-pro": "gemini-1.0-pro-latest", "gemini-pro-vision": "gemini-1.0-pro-vision-latest", - "gemini-flash-2.0": "gemini-2.0-flash-exp", - "gemini-flash-1.5": "gemini-1.5-flash-002", + "gemini-flash-2.0": "gemini-2.0-flash", + "gemini-flash-lite-2.0": "gemini-2.0-flash-lite-preview-02-05", + "gemini-pro-2.0": "gemini-2.0-pro-exp-02-05", + "gemini-flash-thinking-2.0": "gemini-2.0-flash-thinking-exp-01-21", + "gemini-flash-1.5": "gemini-1.5-flash", "gemini-flash-1.5-8b": "gemini-1.5-flash-8b", "gemini-pro-1.5": "gemini-1.5-pro-002", - "gemini-flash-thinking-2.0": "gemini-2.0-flash-thinking-exp-01-21" } }, {