This guide shows how to create agents that interact with Qwen models using the official Qwen-Agent framework from https://github.com/QwenLM/Qwen-Agent. The Qwen-Agent framework provides tools for developing LLM applications based on Qwen's instruction following, tool usage, planning, and memory capabilities.
First, install the Qwen-Agent package:
# Basic installation
pip install -U qwen-agent
# Full installation with optional features
pip install -U "qwen-agent[gui,rag,code_interpreter,mcp]"Here's how to create a basic agent using the Qwen-Agent framework:
import json5
import urllib.parse
from qwen_agent.agents import Assistant
from qwen_agent.tools.base import BaseTool, register_tool
# Optional: Add a custom tool
@register_tool('my_image_gen')
class MyImageGen(BaseTool):
description = 'AI painting (image generation) service, input text description, and return the image URL drawn based on text information.'
parameters = [{
'name': 'prompt',
'type': 'string',
'description': 'Detailed description of the desired image content, in English',
'required': True
}]
def call(self, params: str, **kwargs) -> str:
prompt = json5.loads(params)['prompt']
prompt = urllib.parse.quote(prompt)
return json5.dumps(
{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},
ensure_ascii=False)
# Configure the LLM
llm_cfg = {
'model': 'qwen-max-latest',
'model_type': 'qwen_dashscope',
# 'api_key': 'YOUR_DASHSCOPE_API_KEY',
# Will use DASHSCOPE_API_KEY environment variable if not set
}
# Create an agent
bot = Assistant(
llm=llm_cfg,
system_message='You are a helpful assistant.',
function_list=['my_image_gen'])
# Example usage
messages = []
while True:
query = input('\nUser query: ')
if query.lower() in ['exit', 'quit']:
break
messages.append({'role': 'user', 'content': query})
response = []
for response in bot.run(messages=messages):
# Process response chunks
pass
# Add the response to the conversation history
messages.extend(response)
# Print the final response
final_response = ''.join([r.get('content', '') for r in response if r.get('role') == 'assistant'])
print(f'Assistant: {final_response}')Here's a more sophisticated example using the Qwen-Agent framework with multiple tools:
import json5
from qwen_agent.agents import Assistant
from qwen_agent.tools.base import BaseTool, register_tool
# Define custom tools
@register_tool('get_current_time')
class GetCurrentTime(BaseTool):
description = 'Get the current time and date'
parameters = []
def call(self, params: str, **kwargs) -> str:
from datetime import datetime
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return json5.dumps({"time": current_time}, ensure_ascii=False)
@register_tool('calculate')
class CalculateTool(BaseTool):
description = 'Perform mathematical calculations'
parameters = [{
'name': 'expression',
'type': 'string',
'description': 'Mathematical expression to evaluate',
'required': True
}]
def call(self, params: str, **kwargs) -> str:
try:
import ast
import operator
# Safely evaluate mathematical expressions
expression = json5.loads(params)['expression']
# Parse and evaluate the expression
node = ast.parse(expression, mode='eval')
result = eval_expr(node.body)
return json5.dumps({"result": result}, ensure_ascii=False)
except Exception as e:
return json5.dumps({"error": f"Error in calculation: {str(e)}"}, ensure_ascii=False)
# Safe evaluation function for mathematical expressions
def eval_expr(node):
ops = {
ast.Add: operator.add, ast.Sub: operator.sub,
ast.Mult: operator.mul, ast.Div: operator.truediv,
ast.Pow: operator.pow, ast.Mod: operator.mod,
ast.USub: operator.neg
}
if isinstance(node, ast.Constant): # Numbers
return node.value
elif isinstance(node, ast.BinOp): # Binary operation
left = eval_expr(node.left)
right = eval_expr(node.right)
return ops[type(node.op)](left, right)
elif isinstance(node, ast.UnaryOp): # Unary operation
operand = eval_expr(node.operand)
return ops[type(node.op)](operand)
else:
raise TypeError(f"Unsupported operation: {node}")
# Configure the LLM
llm_cfg = {
'model': 'qwen-max-latest',
'model_type': 'qwen_dashscope',
}
# Create an agent with multiple tools
bot = Assistant(
llm=llm_cfg,
system_message='You are a helpful assistant that can perform calculations and provide current time.',
function_list=['get_current_time', 'calculate'])
# Example usage
messages = [
{'role': 'user', 'content': 'What time is it now?'}
]
for response in bot.run(messages=messages):
# Process response chunks
pass
# Print result
final_response = ''.join([r.get('content', '') for r in response if r.get('role') == 'assistant'])
print(f'Assistant: {final_response}')
messages.extend(response)
# Another example with calculation
messages.append({'role': 'user', 'content': 'Calculate 15 * 24'})
for response in bot.run(messages=messages):
# Process response chunks
pass
final_response = ''.join([r.get('content', '') for r in response if r.get('role') == 'assistant'])
print(f'Assistant: {final_response}')The Qwen-Agent framework also allows creating more customized agents with file processing and code execution capabilities:
from qwen_agent.agents import Assistant
# Configure the LLM with code interpreter capability
llm_cfg = {
'model': 'qwen-max-latest',
'model_type': 'qwen_dashscope',
}
# Create an agent with file processing and code interpreter
bot = Assistant(
llm=llm_cfg,
system_message='You are a helpful assistant that can read documents and execute code. After receiving the user\'s request, you should first analyze the document content and then write code to process the data.',
function_list=['code_interpreter'],
files=['./examples/resource/doc.pdf'] # Reference to a document file
)
# Example usage
messages = [
{'role': 'user', 'content': 'Analyze the document and create a summary.'}
]
for response in bot.run(messages=messages):
# Process response chunks
pass
# Print result
final_response = ''.join([r.get('content', '') for r in response if r.get('role') == 'assistant'])
print(f'Assistant: {final_response}')The Qwen-Agent framework supports MCP (Model Context Protocol) for enhanced capabilities:
from qwen_agent.agents import Assistant
# Configure the LLM with MCP support
llm_cfg = {
'model': 'qwen-max-latest',
'model_type': 'qwen_dashscope',
}
# Create an agent with MCP-compatible tools
bot = Assistant(
llm=llm_cfg,
system_message='You are a helpful assistant with access to various tools through MCP. You can perform web searches, access databases, and more.',
function_list=['web_search', 'database_query'] # These would be MCP-enabled tools
)
# Example usage
messages = [
{'role': 'user', 'content': 'Search for information about quantum computing advancements in 2023'}
]
for response in bot.run(messages=messages):
# Process response chunks
pass
# Print result
final_response = ''.join([r.get('content', '') for r in response if r.get('role') == 'assistant'])
print(f'Assistant: {final_response}')The Qwen-Agent framework also provides a GUI interface using Gradio:
from qwen_agent.agents import Assistant
from qwen_agent.gui import WebUI
# Configure the LLM
llm_cfg = {
'model': 'qwen-max-latest',
'model_type': 'qwen_dashscope',
}
# Create a simple assistant
bot = Assistant(
llm=llm_cfg,
system_message='You are a helpful assistant.',
)
# Launch the web interface
WebUI(bot).run()- Secure Tool Execution: When using the code interpreter or other potentially dangerous tools, ensure proper security measures are in place.
- Handle Large Contexts: Use RAG capabilities for processing documents with large amounts of text.
- Proper Error Handling: Implement error handling for API calls and tool executions.
- Environment Setup: Ensure required environment variables (like
DASHSCOPE_API_KEY) are set before running agents. - Model Selection: Choose the appropriate Qwen model based on your requirements (e.g.,
qwen-max-latestfor complex tasks,qwen-turbofor faster responses). - Custom Tools: Create custom tools by inheriting from
BaseTooland using the@register_tooldecorator to extend agent capabilities.
For the most accurate and up-to-date examples, refer to the Qwen-Agent documentation and examples in the GitHub repository: https://github.com/QwenLM/Qwen-Agent