Skip to content

Commit

Permalink
chore: Update supported models (#1557)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Feb 5, 2025
1 parent 9b41115 commit bc46c44
Show file tree
Hide file tree
Showing 5 changed files with 76 additions and 3 deletions.
35 changes: 34 additions & 1 deletion camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,17 @@ class ModelType(UnifiedModelType, Enum):
O3_MINI = "o3-mini"

GLM_4 = "glm-4"
GLM_4V = 'glm-4v'
GLM_4V = "glm-4v"
GLM_4V_FLASH = "glm-4v-flash"
GLM_4V_PLUS_0111 = "glm-4v-plus-0111"
GLM_4_PLUS = "glm-4-plus"
GLM_4_AIR = "glm-4-air"
GLM_4_AIR_0111 = "glm-4-air-0111"
GLM_4_AIRX = "glm-4-airx"
GLM_4_LONG = "glm-4-long"
GLM_4_FLASHX = "glm-4-flashx"
GLM_4_FLASH = "glm-4-flash"
GLM_ZERO_PREVIEW = "glm-zero-preview"
GLM_3_TURBO = "glm-3-turbo"

# Groq platform models
Expand Down Expand Up @@ -106,6 +116,7 @@ class ModelType(UnifiedModelType, Enum):
NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"

# Gemini models
GEMINI_2_0_FLASH = "gemini-2.0-flash-exp"
GEMINI_1_5_FLASH = "gemini-1.5-flash"
GEMINI_1_5_PRO = "gemini-1.5-pro"
GEMINI_EXP_1114 = "gemini-exp-1114"
Expand Down Expand Up @@ -260,6 +271,16 @@ def is_zhipuai(self) -> bool:
ModelType.GLM_3_TURBO,
ModelType.GLM_4,
ModelType.GLM_4V,
ModelType.GLM_4V_FLASH,
ModelType.GLM_4V_PLUS_0111,
ModelType.GLM_4_PLUS,
ModelType.GLM_4_AIR,
ModelType.GLM_4_AIR_0111,
ModelType.GLM_4_AIRX,
ModelType.GLM_4_LONG,
ModelType.GLM_4_FLASHX,
ModelType.GLM_4_FLASH,
ModelType.GLM_ZERO_PREVIEW,
}

@property
Expand Down Expand Up @@ -356,6 +377,7 @@ def is_gemini(self) -> bool:
bool: Whether this type of models is gemini.
"""
return self in {
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114,
Expand Down Expand Up @@ -516,6 +538,8 @@ def token_limit(self) -> int:
ModelType.NVIDIA_LLAMA3_70B,
ModelType.TOGETHER_MISTRAL_7B,
ModelType.MOONSHOT_V1_8K,
ModelType.GLM_4V_FLASH,
ModelType.GLM_4_AIRX,
}:
return 8_192
elif self in {
Expand All @@ -528,6 +552,8 @@ def token_limit(self) -> int:
ModelType.YI_LARGE_RAG,
ModelType.SAMBA_LLAMA_3_1_8B,
ModelType.SAMBA_LLAMA_3_1_405B,
ModelType.GLM_4V_PLUS_0111,
ModelType.GLM_ZERO_PREVIEW,
}:
return 16_384
elif self in {
Expand Down Expand Up @@ -595,6 +621,11 @@ def token_limit(self) -> int:
ModelType.SGLANG_LLAMA_3_2_1B,
ModelType.SGLANG_MIXTRAL_NEMO,
ModelType.MOONSHOT_V1_128K,
ModelType.GLM_4_PLUS,
ModelType.GLM_4_AIR,
ModelType.GLM_4_AIR_0111,
ModelType.GLM_4_FLASHX,
ModelType.GLM_4_FLASH,
}:
return 128_000
elif self in {
Expand Down Expand Up @@ -628,9 +659,11 @@ def token_limit(self) -> int:
}:
return 256_000
elif self in {
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114, # Not given in docs, assuming the same
ModelType.GLM_4_LONG,
}:
return 1_048_576
elif self in {
Expand Down
13 changes: 12 additions & 1 deletion docs/key_modules/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ The following table lists currently supported model platforms by CAMEL.
| Anthropic | claude-3-sonnet-20240229 | Y |
| Anthropic | claude-3-opus-latest | Y |
| Anthropic | claude-2.0 | N |
| Gemini | gemini-2.0-flash-exp | Y |
| Gemini | gemini-1.5-pro | Y |
| Gemini | gemini-1.5-flash | Y |
| Gemini | gemini-exp-1114 | Y |
Expand Down Expand Up @@ -72,8 +73,18 @@ The following table lists currently supported model platforms by CAMEL.
| Qwen | qwen2.5-14b-instruct | N |
| DeepSeek | deepseek-chat | N |
| DeepSeek | deepseek-reasoner | N |
| ZhipuAI | glm-4 | Y |
| ZhipuAI | glm-4v | Y |
| ZhipuAI | glm-4 | N |
| ZhipuAI | glm-4v-flash | Y |
| ZhipuAI | glm-4v-plus-0111 | Y |
| ZhipuAI | glm-4-plus | N |
| ZhipuAI | glm-4-air | N |
| ZhipuAI | glm-4-air-0111 | N |
| ZhipuAI | glm-4-airx | N |
| ZhipuAI | glm-4-long | N |
| ZhipuAI | glm-4-flashx | N |
| ZhipuAI | glm-zero-preview | N |
| ZhipuAI | glm-4-flash | N |
| ZhipuAI | glm-3-turbo | N |
| InternLM | internlm3-latest | N |
| InternLM | internlm3-8b-instruct | N |
Expand Down
20 changes: 19 additions & 1 deletion examples/models/gemini_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
'''


# Example of using the newest Gemini-Exp-1114 model
# Example of using the Gemini-Exp-1114 model
model_exp = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_EXP_1114,
Expand Down Expand Up @@ -73,3 +73,21 @@
Let me know how I can be of service!
===============================================================================
'''

# Example of using the gemini-2.0-flash-exp model
model_2_0_flash = ModelFactory.create(
model_platform=ModelPlatformType.GEMINI,
model_type=ModelType.GEMINI_2_0_FLASH,
model_config_dict=GeminiConfig(temperature=0.2).as_dict(),
)
camel_agent_exp = ChatAgent(system_message=sys_msg, model=model_2_0_flash)
response_exp = camel_agent_exp.step(user_msg)
print(response_exp.msgs[0].content)

'''
===============================================================================
Hello! I'm happy to say hi to CAMEL AI, one open-source community dedicated to
the study of autonomous and communicative agents. It sounds like a fascinating
community!
===============================================================================
'''
1 change: 1 addition & 0 deletions test/models/test_gemini_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
@pytest.mark.parametrize(
"model_type",
[
ModelType.GEMINI_2_0_FLASH,
ModelType.GEMINI_1_5_FLASH,
ModelType.GEMINI_1_5_PRO,
ModelType.GEMINI_EXP_1114,
Expand Down
10 changes: 10 additions & 0 deletions test/models/test_zhipuai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,16 @@
ModelType.GLM_3_TURBO,
ModelType.GLM_4,
ModelType.GLM_4V,
ModelType.GLM_4V_FLASH,
ModelType.GLM_4V_PLUS_0111,
ModelType.GLM_4_PLUS,
ModelType.GLM_4_AIR,
ModelType.GLM_4_AIR_0111,
ModelType.GLM_4_AIRX,
ModelType.GLM_4_LONG,
ModelType.GLM_4_FLASHX,
ModelType.GLM_4_FLASH,
ModelType.GLM_ZERO_PREVIEW,
],
)
def test_zhipuai_model(model_type: ModelType):
Expand Down

0 comments on commit bc46c44

Please sign in to comment.