diff --git a/.gitignore b/.gitignore index 8e066b585..702982568 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ docker/volumes/db/data docker/.env docker/.run docker/deploy.options +k8s/helm/.deploy.options frontend_standalone/ .pnpm-store/ @@ -32,4 +33,4 @@ model-assets/ *coverage_html *.pytest_cache *.coverage -*coverage.xml \ No newline at end of file +*coverage.xml diff --git a/README.md b/README.md index 2a6371d4b..894cd1862 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Docker Pulls](https://img.shields.io/docker/pulls/nexent/nexent?logo=docker&label=DockerPull)](https://hub.docker.com/repositories/nexent) [![Codecov (with branch)](https://img.shields.io/codecov/c/github/ModelEngine-Group/nexent/develop?logo=codecov&color=green)](https://codecov.io/gh/ModelEngine-Group/nexent) -Nexent is a zero-code platform for auto-generating agents — no orchestration, no complex drag-and-drop required, using pure language to develop any agent you want. Built on the MCP ecosystem with rich tool integration, Nexent also provides various built-in agents to meet your intelligent service needs in different scenarios such as work, travel, and daily life. Nexent offers powerful capabilities for agent running control, multi-agent collaboration, data processing and knowledge tracing, multimodal dialogue, and batch scaling. +Nexent is a zero-code platform for auto-generating production-grade AI agents, built on **Harness Engineering** principles. It provides unified tools, skills, memory, and orchestration with built-in constraints, feedback loops, and control planes — no orchestration, no complex drag-and-drop required, using pure language to develop any agent you want. > One prompt. Endless reach. @@ -58,7 +58,7 @@ Most of all, we need visibility. Star ⭐ and watch the repo, share it with frie ## 💬 Community & contact -- Browse the [Documentation](https://modelengine-group.github.io/nexent) for more information. +- Browse the [Documentation](https://modelengine-group.github.io/nexent) for more information. - Join our [Discord community](https://discord.gg/tb5H3S3wyv) to chat with other developers and get help! - Conntact us by Wechat, find our QR Code in our [website](https://nexent.tech/en/contact) @@ -119,5 +119,4 @@ Prefer to run Nexent from source code? Follow our [Developer Guide](https://mode # 📄 License -Nexent is licensed under the [MIT](LICENSE) with additional conditions. Please read the [LICENSE](LICENSE) file for details. - +Nexent is licensed under the [MIT License](LICENSE). diff --git a/README_CN.md b/README_CN.md index 7bdfd5209..c16de5d32 100644 --- a/README_CN.md +++ b/README_CN.md @@ -7,7 +7,7 @@ [![Docker Pulls](https://img.shields.io/docker/pulls/nexent/nexent?logo=docker&label=DockerPull)](https://hub.docker.com/repositories/nexent) [![Codecov (with branch)](https://img.shields.io/codecov/c/github/ModelEngine-Group/nexent/develop?logo=codecov&color=green)](https://codecov.io/gh/ModelEngine-Group/nexent) -Nexent 是一个零代码智能体自动生成平台 —— 无需编排,无需复杂的拖拉拽操作,使用纯语言开发你想要的任何智能体。基于MCP生态,具备丰富的工具集成,同时提供多种自带智能体,满足你的工作、旅行、生活等不同场景的智能服务需要。Nexent 还提供强大的智能体运行控制、多智能体协作、数据处理和知识溯源、多模态对话、批量扩展能力。 +Nexent 是一个基于 **Harness Engineering** 原则打造的零代码智能体自动生成平台。集统一工具、技能、记忆和编排能力于一体,内置约束机制、反馈循环和控制平面。无需编排,无需复杂的拖拉拽操作,使用纯语言开发你想要的任何智能体。 > 一个提示词,无限种可能。 @@ -58,7 +58,7 @@ bash deploy.sh ## 💬 社区与联系方式 -- 浏览 [文档](https://modelengine-group.github.io/nexent) 了解更多信息。 +- 浏览 [文档](https://modelengine-group.github.io/nexent) 了解更多信息。 - 加入我们的 [Discord 社区](https://discord.gg/tb5H3S3wyv) 与其他开发者交流并获取帮助! - 通过微信联系我们,在我们的[网站](https://nexent.tech/zh/contact)找到二维码 @@ -119,4 +119,4 @@ bash deploy.sh # 📄 许可证 -Nexent 采用 [MIT](LICENSE) 许可证,并附有额外条件。请阅读 [LICENSE](LICENSE) 文件了解详情。 +Nexent 采用 [MIT 许可证](LICENSE)。 diff --git a/backend/agents/create_agent_info.py b/backend/agents/create_agent_info.py index faed9ce79..bc4031e0a 100644 --- a/backend/agents/create_agent_info.py +++ b/backend/agents/create_agent_info.py @@ -1,10 +1,10 @@ import threading import logging +from typing import List from urllib.parse import urljoin from datetime import datetime from jinja2 import Template, StrictUndefined -from smolagents.utils import BASE_BUILTIN_MODULES from nexent.core.utils.observer import MessageObserver from nexent.core.agents.agent_model import AgentRunInfo, ModelConfig, AgentConfig, ToolConfig from nexent.memory.memory_service import search_memory_in_levels @@ -27,11 +27,119 @@ from utils.prompt_template_utils import get_agent_prompt_template from utils.config_utils import tenant_config_manager, get_model_name_from_config from consts.const import LOCAL_MCP_SERVER, MODEL_CONFIG_MAPPING, LANGUAGE, DATA_PROCESS_SERVICE +import re logger = logging.getLogger("create_agent_info") logger.setLevel(logging.DEBUG) +def _get_skills_for_template( + agent_id: int, + tenant_id: str, + version_no: int = 0 +) -> List[dict]: + """Get skills list for prompt template injection. + + Args: + agent_id: Agent ID + tenant_id: Tenant ID + version_no: Version number + + Returns: + List of skill dicts with name and description + """ + try: + from services.skill_service import SkillService + skill_service = SkillService() + enabled_skills = skill_service.get_enabled_skills_for_agent( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + return [ + {"name": s.get("name", ""), "description": s.get("description", "")} + for s in enabled_skills + ] + except Exception as e: + logger.warning(f"Failed to get skills for template: {e}") + return [] + + +def _get_skill_script_tools( + agent_id: int, + tenant_id: str, + version_no: int = 0 +) -> List[ToolConfig]: + """Get tool config for skill script execution and skill reading. + + Args: + agent_id: Agent ID for filtering available skills in error messages. + tenant_id: Tenant ID for filtering available skills in error messages. + version_no: Version number for filtering available skills. + + Returns: + List of ToolConfig for skill execution and reading tools + """ + from consts.const import CONTAINER_SKILLS_PATH + + skill_context = { + "agent_id": agent_id, + "tenant_id": tenant_id, + "version_no": version_no, + } + + try: + return [ + ToolConfig( + class_name="RunSkillScriptTool", + name="run_skill_script", + description="Execute a skill script with given parameters. Use this to run Python or shell scripts that are part of a skill.", + inputs='{"skill_name": "str", "script_path": "str", "params": "dict"}', + output_type="string", + params={"local_skills_dir": CONTAINER_SKILLS_PATH}, + source="builtin", + usage="builtin", + metadata=skill_context, + ), + ToolConfig( + class_name="ReadSkillMdTool", + name="read_skill_md", + description="Read skill execution guide and optional additional files. Always reads SKILL.md first, then optionally reads additional files.", + inputs='{"skill_name": "str", "additional_files": "list[str]"}', + output_type="string", + params={"local_skills_dir": CONTAINER_SKILLS_PATH}, + source="builtin", + usage="builtin", + metadata=skill_context, + ), + ToolConfig( + class_name="ReadSkillConfigTool", + name="read_skill_config", + description="Read the config.yaml file from a skill directory. Returns JSON containing configuration variables needed for skill workflows.", + inputs='{"skill_name": "str"}', + output_type="string", + params={"local_skills_dir": CONTAINER_SKILLS_PATH}, + source="builtin", + usage="builtin", + metadata=skill_context, + ), + ToolConfig( + class_name="WriteSkillFileTool", + name="write_skill_file", + description="Write content to a file within a skill directory. Creates parent directories if they do not exist.", + inputs='{"skill_name": "str", "file_path": "str", "content": "str"}', + output_type="string", + params={"local_skills_dir": CONTAINER_SKILLS_PATH}, + source="builtin", + usage="builtin", + metadata=skill_context, + ) + ] + except Exception as e: + logger.warning(f"Failed to load skill script tool: {e}") + return [] + + async def create_model_config_list(tenant_id): records = get_model_records({"model_type": "llm"}, tenant_id) model_list = [] @@ -169,22 +277,26 @@ async def create_agent_config( logger.error(f"Failed to build knowledge base summary: {e}") # Assemble system_prompt - if duty_prompt or constraint_prompt or few_shots_prompt: - system_prompt = Template(prompt_template["system_prompt"], undefined=StrictUndefined).render({ - "duty": duty_prompt, - "constraint": constraint_prompt, - "few_shots": few_shots_prompt, - "tools": {tool.name: tool for tool in tool_list}, - "managed_agents": {agent.name: agent for agent in managed_agents}, - "authorized_imports": str(BASE_BUILTIN_MODULES), - "APP_NAME": app_name, - "APP_DESCRIPTION": app_description, - "memory_list": memory_list, - "knowledge_base_summary": knowledge_base_summary, - "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S") - }) - else: - system_prompt = agent_info.get("prompt", "") + # Get skills list for prompt template + skills = _get_skills_for_template(agent_id, tenant_id, version_no) + + render_kwargs = { + "duty": duty_prompt, + "constraint": constraint_prompt, + "few_shots": few_shots_prompt, + "tools": {tool.name: tool for tool in tool_list}, + "skills": skills, + "managed_agents": {agent.name: agent for agent in managed_agents}, + "APP_NAME": app_name, + "APP_DESCRIPTION": app_description, + "memory_list": memory_list, + "knowledge_base_summary": knowledge_base_summary, + "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "user_id": user_id, + } + system_prompt = Template(prompt_template["system_prompt"], undefined=StrictUndefined).render(render_kwargs) + + _print_prompt_with_token_count(system_prompt, agent_id, "BEFORE_INJECTION") if agent_info.get("model_id") is not None: model_info = get_model_by_model_id(agent_info.get("model_id")) @@ -197,9 +309,10 @@ async def create_agent_config( prompt_templates=await prepare_prompt_templates( is_manager=len(managed_agents) > 0, system_prompt=system_prompt, - language=language + language=language, + agent_id=agent_id ), - tools=tool_list, + tools=tool_list + _get_skill_script_tools(agent_id, tenant_id, version_no), max_steps=agent_info.get("max_steps", 10), model_name=model_name, provide_run_summary=agent_info.get("provide_run_summary", False), @@ -296,7 +409,12 @@ async def discover_langchain_tools(): return langchain_tools -async def prepare_prompt_templates(is_manager: bool, system_prompt: str, language: str = 'zh'): +async def prepare_prompt_templates( + is_manager: bool, + system_prompt: str, + language: str = 'zh', + agent_id: int = None, +): """ Prepare prompt templates, support multiple languages @@ -304,15 +422,33 @@ async def prepare_prompt_templates(is_manager: bool, system_prompt: str, languag is_manager: Whether it is a manager mode system_prompt: System prompt content language: Language code ('zh' or 'en') + agent_id: Agent ID for fetching skill instances Returns: dict: Prompt template configuration """ prompt_templates = get_agent_prompt_template(is_manager, language) prompt_templates["system_prompt"] = system_prompt + + # Print final prompt with all injections + _print_prompt_with_token_count(prompt_templates["system_prompt"], agent_id, "FINAL_PROMPT") + return prompt_templates +def _print_prompt_with_token_count(prompt: str, agent_id: int = None, stage: str = "PROMPT"): + """Print prompt content and estimate token count using tiktoken.""" + try: + import tiktoken + encoding = tiktoken.get_encoding("cl100k_base") + token_count = len(encoding.encode(prompt)) + logger.info(f"[Skill Debug][{stage}] Agent {agent_id} token count: {token_count}") + logger.info(f"[Skill Debug][{stage}] Agent {agent_id} prompt:\n{prompt}") + except Exception as e: + logger.warning(f"[Skill Debug][{stage}] Failed to count tokens: {e}") + logger.info(f"[Skill Debug][{stage}] Agent {agent_id} prompt:\n{prompt}") + + async def join_minio_file_description_to_query(minio_files, query): final_query = query if minio_files and isinstance(minio_files, list): diff --git a/backend/apps/agent_app.py b/backend/apps/agent_app.py index a42d11b53..595569050 100644 --- a/backend/apps/agent_app.py +++ b/backend/apps/agent_app.py @@ -20,7 +20,8 @@ run_agent_stream, stop_agent_tasks, get_agent_call_relationship_impl, - clear_agent_new_mark_impl + clear_agent_new_mark_impl, + get_agent_by_name_impl, ) from services.agent_version_service import ( publish_version_impl, @@ -100,6 +101,27 @@ async def search_agent_info_api( status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Agent search info error.") +@agent_config_router.get("/by-name/{agent_name}") +async def get_agent_by_name_api( + agent_name: str, + tenant_id: Optional[str] = Query( + None, description="Tenant ID for filtering (uses auth if not provided)"), + authorization: Optional[str] = Header(None) +): + """ + Look up an agent by name and return its agent_id and highest published version_no. + """ + try: + _, auth_tenant_id = get_current_user_id(authorization) + effective_tenant_id = tenant_id or auth_tenant_id + result = get_agent_by_name_impl(agent_name, effective_tenant_id) + return JSONResponse(status_code=HTTPStatus.OK, content=result) + except Exception as e: + logger.error(f"Agent by name lookup error: {str(e)}") + raise HTTPException( + status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Agent not found.") + + @agent_config_router.get("/get_creating_sub_agent_id") async def get_creating_sub_agent_info_api(authorization: Optional[str] = Header(None)): """ diff --git a/backend/apps/config_app.py b/backend/apps/config_app.py index 58e2b008b..ec1db6e7a 100644 --- a/backend/apps/config_app.py +++ b/backend/apps/config_app.py @@ -14,6 +14,7 @@ from apps.model_managment_app import router as model_manager_router from apps.prompt_app import router as prompt_router from apps.remote_mcp_app import router as remote_mcp_router +from apps.skill_app import router as skill_router from apps.tenant_config_app import router as tenant_config_router from apps.tool_config_app import router as tool_config_router from apps.user_management_app import router as user_management_router @@ -52,6 +53,7 @@ app.include_router(summary_router) app.include_router(prompt_router) +app.include_router(skill_router) app.include_router(tenant_config_router) app.include_router(remote_mcp_router) app.include_router(tenant_router) diff --git a/backend/apps/skill_app.py b/backend/apps/skill_app.py new file mode 100644 index 000000000..8bf19e8b7 --- /dev/null +++ b/backend/apps/skill_app.py @@ -0,0 +1,540 @@ +"""Skill management HTTP endpoints.""" + +import logging +import os +import re +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, HTTPException, Query, UploadFile, File, Form, Header +from starlette.responses import JSONResponse +from pydantic import BaseModel + +from consts.exceptions import SkillException, UnauthorizedError +from services.skill_service import SkillService +from consts.model import SkillInstanceInfoRequest +from utils.auth_utils import get_current_user_id + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/skills", tags=["skills"]) + + +class SkillCreateRequest(BaseModel): + """Request model for creating a skill.""" + name: str + description: str + content: str + tool_ids: Optional[List[int]] = [] # Use tool_id list, link to ag_tool_info_t + tool_names: Optional[List[str]] = [] # Alternative: use tool name list, will be converted to tool_ids + tags: Optional[List[str]] = [] + source: Optional[str] = "custom" # official, custom, partner + params: Optional[Dict[str, Any]] = None # Skill config (JSON object) + + +class SkillUpdateRequest(BaseModel): + """Request model for updating a skill.""" + description: Optional[str] = None + content: Optional[str] = None + tool_ids: Optional[List[int]] = None # Use tool_id list + tool_names: Optional[List[str]] = None # Alternative: use tool name list, will be converted to tool_ids + tags: Optional[List[str]] = None + source: Optional[str] = None + params: Optional[Dict[str, Any]] = None + + +class SkillResponse(BaseModel): + """Response model for skill data.""" + skill_id: int + name: str + description: str + content: str + tool_ids: List[int] + tags: List[str] + source: str + params: Optional[Dict[str, Any]] = None + created_by: Optional[str] = None + create_time: Optional[str] = None + updated_by: Optional[str] = None + update_time: Optional[str] = None + + +# List routes first (no path parameters) +@router.get("") +async def list_skills() -> JSONResponse: + """List all available skills.""" + try: + service = SkillService() + skills = service.list_skills() + return JSONResponse(content={"skills": skills}) + except SkillException as e: + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error(f"Error listing skills: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +# POST routes +@router.post("") +async def create_skill( + request: SkillCreateRequest, + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Create a new skill (JSON format).""" + try: + user_id, tenant_id = get_current_user_id(authorization) + service = SkillService() + + # Convert tool_names to tool_ids if provided + tool_ids = request.tool_ids or [] + if request.tool_names: + tool_ids = service.repository.get_tool_ids_by_names(request.tool_names, tenant_id) + + skill_data = { + "name": request.name, + "description": request.description, + "content": request.content, + "tool_ids": tool_ids, + "tags": request.tags, + "source": request.source, + "params": request.params, + } + skill = service.create_skill(skill_data, user_id=user_id) + return JSONResponse(content=skill, status_code=201) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except SkillException as e: + error_msg = str(e).lower() + if "already exists" in error_msg: + raise HTTPException(status_code=409, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error creating skill: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.post("/upload") +async def create_skill_from_file( + file: UploadFile = File(..., description="SKILL.md file or ZIP archive"), + skill_name: Optional[str] = Form(None, description="Optional skill name override"), + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Create a skill from file upload. + + Supports two formats: + - Single SKILL.md file: Extracts metadata and saves directly + - ZIP archive: Contains SKILL.md plus scripts/assets folders + """ + try: + user_id, tenant_id = get_current_user_id(authorization) + service = SkillService() + + content = await file.read() + + file_type = "auto" + if file.filename: + if file.filename.endswith(".zip"): + file_type = "zip" + elif file.filename.endswith(".md"): + file_type = "md" + + skill = service.create_skill_from_file( + file_content=content, + skill_name=skill_name, + file_type=file_type, + user_id=user_id, + tenant_id=tenant_id + ) + return JSONResponse(content=skill, status_code=201) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except SkillException as e: + error_msg = str(e).lower() + if "already exists" in error_msg: + raise HTTPException(status_code=409, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error creating skill from file: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +# Routes with path parameters +@router.get("/{skill_name}/files") +async def get_skill_file_tree(skill_name: str) -> JSONResponse: + """Get file tree structure of a skill.""" + try: + service = SkillService() + tree = service.get_skill_file_tree(skill_name) + if not tree: + raise HTTPException(status_code=404, detail=f"Skill not found: {skill_name}") + return JSONResponse(content=tree) + except HTTPException: + raise + except SkillException as e: + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error(f"Error getting skill file tree: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.get("/{skill_name}/files/{file_path:path}") +async def get_skill_file_content( + skill_name: str, + file_path: str +) -> JSONResponse: + """Get content of a specific file within a skill. + + Args: + skill_name: Name of the skill + file_path: Relative path to the file within the skill directory + """ + try: + service = SkillService() + content = service.get_skill_file_content(skill_name, file_path) + if content is None: + raise HTTPException(status_code=404, detail=f"File not found: {file_path}") + return JSONResponse(content={"content": content}) + except HTTPException: + raise + except SkillException as e: + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error(f"Error getting skill file content: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.put("/{skill_name}/upload") +async def update_skill_from_file( + skill_name: str, + file: UploadFile = File(..., description="SKILL.md file or ZIP archive"), + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Update a skill from file upload. + + Supports both SKILL.md and ZIP formats. + """ + try: + user_id, tenant_id = get_current_user_id(authorization) + service = SkillService() + + content = await file.read() + + file_type = "auto" + if file.filename: + if file.filename.endswith(".zip"): + file_type = "zip" + elif file.filename.endswith(".md"): + file_type = "md" + + skill = service.update_skill_from_file( + skill_name=skill_name, + file_content=content, + file_type=file_type, + user_id=user_id, + tenant_id=tenant_id + ) + return JSONResponse(content=skill) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except SkillException as e: + if "not found" in str(e).lower(): + raise HTTPException(status_code=404, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error updating skill from file: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +# ============== Skill Instance APIs ============== + +@router.get("/instance") +async def get_skill_instance( + agent_id: int = Query(..., description="Agent ID"), + skill_id: int = Query(..., description="Skill ID"), + version_no: int = Query(0, description="Version number (0 for draft)"), + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Get a specific skill instance for an agent.""" + try: + _, tenant_id = get_current_user_id(authorization) + + service = SkillService() + instance = service.get_skill_instance( + agent_id=agent_id, + skill_id=skill_id, + tenant_id=tenant_id, + version_no=version_no + ) + + if not instance: + raise HTTPException( + status_code=404, + detail=f"Skill instance not found for agent {agent_id} and skill {skill_id}" + ) + + # Enrich with skill info from ag_skill_info_t (skill_name, skill_description, skill_content, params) + skill = service.get_skill_by_id(skill_id) + if skill: + instance["skill_name"] = skill.get("name") + instance["skill_description"] = skill.get("description", "") + instance["skill_content"] = skill.get("content", "") + instance["skill_params"] = skill.get("params") or {} + + return JSONResponse(content=instance) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting skill instance: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.post("/instance/update") +async def update_skill_instance( + request: SkillInstanceInfoRequest, + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Create or update a skill instance for a specific agent. + + This allows customizing skill content for a specific agent without + modifying the global skill definition. + """ + try: + user_id, tenant_id = get_current_user_id(authorization) + + # Validate skill exists + service = SkillService() + skill = service.get_skill_by_id(request.skill_id) + if not skill: + raise HTTPException(status_code=404, detail=f"Skill with ID {request.skill_id} not found") + + # Create or update skill instance + instance = service.create_or_update_skill_instance( + skill_info=request, + tenant_id=tenant_id, + user_id=user_id, + version_no=request.version_no + ) + + return JSONResponse(content={"message": "Skill instance updated", "instance": instance}) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except HTTPException: + raise + except SkillException as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error updating skill instance: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.get("/instance/list") +async def list_skill_instances( + agent_id: int = Query(..., description="Agent ID to query skill instances"), + version_no: int = Query(0, description="Version number (0 for draft)"), + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """List all skill instances for a specific agent.""" + try: + _, tenant_id = get_current_user_id(authorization) + + service = SkillService() + + instances = service.list_skill_instances( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + + # Enrich with skill info from ag_skill_info_t (skill_name, skill_description, skill_content, params) + for instance in instances: + skill = service.get_skill_by_id(instance.get("skill_id")) + if skill: + instance["skill_name"] = skill.get("name") + instance["skill_description"] = skill.get("description", "") + instance["skill_content"] = skill.get("content", "") + instance["skill_params"] = skill.get("params") or {} + + return JSONResponse(content={"instances": instances}) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except Exception as e: + logger.error(f"Error listing skill instances: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.get("/{skill_name}") +async def get_skill(skill_name: str) -> JSONResponse: + """Get a specific skill by name.""" + try: + service = SkillService() + skill = service.get_skill(skill_name) + if not skill: + raise HTTPException(status_code=404, detail=f"Skill not found: {skill_name}") + return JSONResponse(content=skill) + except HTTPException: + raise + except SkillException as e: + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error(f"Error getting skill {skill_name}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.put("/{skill_name}") +async def update_skill( + skill_name: str, + request: SkillUpdateRequest, + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Update an existing skill. + + Audit field updated_by is set from the authenticated user only; it is not read from the JSON body. + """ + try: + user_id, tenant_id = get_current_user_id(authorization) + service = SkillService() + update_data = {} + if request.description is not None: + update_data["description"] = request.description + if request.content is not None: + update_data["content"] = request.content + if request.tool_ids is not None: + # Convert tool_names to tool_ids if tool_names provided, else use tool_ids directly + if request.tool_names: + update_data["tool_ids"] = service.repository.get_tool_ids_by_names(request.tool_names, tenant_id) + else: + update_data["tool_ids"] = request.tool_ids + elif request.tool_names is not None: + # Only tool_names provided, convert to tool_ids + update_data["tool_ids"] = service.repository.get_tool_ids_by_names(request.tool_names, tenant_id) + if request.tags is not None: + update_data["tags"] = request.tags + if request.source is not None: + update_data["source"] = request.source + if request.params is not None: + update_data["params"] = request.params + + if not update_data: + raise HTTPException(status_code=400, detail="No fields to update") + + skill = service.update_skill(skill_name, update_data, user_id=user_id) + return JSONResponse(content=skill) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except SkillException as e: + if "not found" in str(e).lower(): + raise HTTPException(status_code=404, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error updating skill {skill_name}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.delete("/{skill_name}") +async def delete_skill( + skill_name: str, + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Delete a skill.""" + try: + user_id, _ = get_current_user_id(authorization) + service = SkillService() + service.delete_skill(skill_name, user_id=user_id) + return JSONResponse(content={"message": f"Skill {skill_name} deleted successfully"}) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except SkillException as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error deleting skill {skill_name}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + + +@router.delete("/{skill_name}/files/{file_path:path}") +async def delete_skill_file( + skill_name: str, + file_path: str, + authorization: Optional[str] = Header(None) +) -> JSONResponse: + """Delete a specific file within a skill directory. + + Args: + skill_name: Name of the skill + file_path: Relative path to the file within the skill directory + """ + try: + _, _ = get_current_user_id(authorization) + service = SkillService() + + # Validate skill_name so it cannot be used for path traversal + if not skill_name: + raise HTTPException(status_code=400, detail="Invalid skill name") + if os.sep in skill_name or "/" in skill_name or ".." in skill_name: + raise HTTPException(status_code=400, detail="Invalid skill name") + + # Read config to get temp_filename for validation + config_content = service.get_skill_file_content(skill_name, "config.yaml") + if config_content is None: + raise HTTPException(status_code=404, detail="Config file not found") + + # Parse config to get temp_filename + import yaml + config = yaml.safe_load(config_content) + temp_filename = config.get("temp_filename", "") + + # Get the base directory for the skill + local_dir = os.path.join(service.skill_manager.local_skills_dir, skill_name) + + # Check for path traversal patterns in the raw file_path BEFORE any normalization + # This catches attempts like ../../etc/passwd or /etc/passwd + normalized_for_check = os.path.normpath(file_path) + if ".." in file_path or file_path.startswith("/") or (os.sep in file_path and file_path.startswith(os.sep)): + # Additional check: ensure the normalized path doesn't escape local_dir + abs_local_dir = os.path.abspath(local_dir) + abs_full_path = os.path.abspath(os.path.join(local_dir, normalized_for_check)) + try: + common = os.path.commonpath([abs_local_dir, abs_full_path]) + if common != abs_local_dir: + raise HTTPException(status_code=400, detail="Invalid file path: path traversal detected") + except ValueError: + raise HTTPException(status_code=400, detail="Invalid file path: path traversal detected") + + # Normalize the requested file path - use basename to strip directory components + safe_file_path = os.path.basename(os.path.normpath(file_path)) + + # Build full path and validate it stays within local_dir + full_path = os.path.normpath(os.path.join(local_dir, safe_file_path)) + abs_local_dir = os.path.abspath(local_dir) + abs_full_path = os.path.abspath(full_path) + + # Check for path traversal: abs_full_path should be within abs_local_dir + try: + common = os.path.commonpath([abs_local_dir, abs_full_path]) + if common != abs_local_dir: + raise HTTPException(status_code=400, detail="Invalid file path: path traversal detected") + except ValueError: + # Different drives on Windows + raise HTTPException(status_code=400, detail="Invalid file path: path traversal detected") + + # Validate the filename matches temp_filename + if not temp_filename or safe_file_path != temp_filename: + raise HTTPException(status_code=400, detail="Can only delete temp_filename files") + + # Check if file exists + if not os.path.exists(full_path): + raise HTTPException(status_code=404, detail=f"File not found: {safe_file_path}") + + os.remove(full_path) + logger.info(f"Deleted skill file: {full_path}") + + return JSONResponse(content={"message": f"File {safe_file_path} deleted successfully"}) + except UnauthorizedError as e: + raise HTTPException(status_code=401, detail=str(e)) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error deleting skill file {skill_name}/{file_path}: {e}") + raise HTTPException(status_code=500, detail=str(e)) diff --git a/backend/apps/vectordatabase_app.py b/backend/apps/vectordatabase_app.py index 04ea9820f..872b5387b 100644 --- a/backend/apps/vectordatabase_app.py +++ b/backend/apps/vectordatabase_app.py @@ -18,7 +18,7 @@ from services.redis_service import get_redis_service from utils.auth_utils import get_current_user_id from utils.file_management_utils import get_all_files_status -from database.knowledge_db import get_index_name_by_knowledge_name +from database.knowledge_db import get_index_name_by_knowledge_name, get_knowledge_record router = APIRouter(prefix="/indices") service = ElasticSearchService() @@ -54,7 +54,7 @@ def create_new_index( embedding_dim: Optional[int] = Query( None, description="Dimension of the embedding vectors"), request: Dict[str, Any] = Body( - None, description="Request body with optional fields (ingroup_permission, group_ids)"), + None, description="Request body with optional fields (ingroup_permission, group_ids, embedding_model_name)"), vdb_core: VectorDatabaseCore = Depends(get_vector_db_core), authorization: Optional[str] = Header(None) ): @@ -65,9 +65,11 @@ def create_new_index( # Extract optional fields from request body ingroup_permission = None group_ids = None + embedding_model_name = None if request: ingroup_permission = request.get("ingroup_permission") group_ids = request.get("group_ids") + embedding_model_name = request.get("embedding_model_name") # Treat path parameter as user-facing knowledge base name for new creations return ElasticSearchService.create_knowledge_base( @@ -78,6 +80,7 @@ def create_new_index( tenant_id=tenant_id, ingroup_permission=ingroup_permission, group_ids=group_ids, + embedding_model_name=embedding_model_name, ) except Exception as e: raise HTTPException( @@ -195,7 +198,16 @@ def create_index_documents( """ try: user_id, tenant_id = get_current_user_id(authorization) - embedding_model = get_embedding_model(tenant_id) + + # Get the knowledge base record to retrieve the saved embedding model + knowledge_record = get_knowledge_record({'index_name': index_name}) + saved_embedding_model_name = None + if knowledge_record: + saved_embedding_model_name = knowledge_record.get('embedding_model_name') + + # Use the saved model from knowledge base, fallback to tenant default if not set + embedding_model = get_embedding_model(tenant_id, saved_embedding_model_name) + return ElasticSearchService.index_documents( embedding_model=embedding_model, index_name=index_name, diff --git a/backend/consts/const.py b/backend/consts/const.py index a5fc1c005..94ddd495d 100644 --- a/backend/consts/const.py +++ b/backend/consts/const.py @@ -35,6 +35,9 @@ class VectorDatabaseType(str, Enum): UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER', 'uploads') ROOT_DIR = os.getenv("ROOT_DIR") +# Container-internal skills storage path +CONTAINER_SKILLS_PATH = os.getenv("SKILLS_PATH") + # Preview Configuration FILE_PREVIEW_SIZE_LIMIT = 100 * 1024 * 1024 # 100MB @@ -322,5 +325,11 @@ class VectorDatabaseType(str, Enum): # Model Engine Configuration MODEL_ENGINE_ENABLED = os.getenv("MODEL_ENGINE_ENABLED") + +# Container Platform Configuration +IS_DEPLOYED_BY_KUBERNETES = os.getenv("IS_DEPLOYED_BY_KUBERNETES", "false").lower() == "true" +KUBERNETES_NAMESPACE = os.getenv("KUBERNETES_NAMESPACE", "nexent") + + # APP Version APP_VERSION = "v2.0.0" diff --git a/backend/consts/exceptions.py b/backend/consts/exceptions.py index 369c24aab..c4e01e5bb 100644 --- a/backend/consts/exceptions.py +++ b/backend/consts/exceptions.py @@ -195,6 +195,11 @@ class DataMateConnectionError(Exception): pass +class SkillException(Exception): + """Raised when skill operations fail.""" + pass + + # ==================== Legacy Aliases (same as above, for compatibility) ==================== # These are additional aliases that map to the same simple exception classes above. # They provide backward compatibility for code that uses these names. diff --git a/backend/consts/model.py b/backend/consts/model.py index 6aea42fa9..2728d95ca 100644 --- a/backend/consts/model.py +++ b/backend/consts/model.py @@ -131,7 +131,6 @@ class GlobalConfig(BaseModel): class AgentRequest(BaseModel): query: str conversation_id: Optional[int] = None - is_set: Optional[bool] = False history: Optional[List[Dict]] = None # Complete list of attachment information minio_files: Optional[List[Dict[str, Any]]] = None @@ -277,6 +276,7 @@ class AgentInfoRequest(BaseModel): business_logic_model_name: Optional[str] = None business_logic_model_id: Optional[int] = None enabled_tool_ids: Optional[List[int]] = None + enabled_skill_ids: Optional[List[int]] = None related_agent_ids: Optional[List[int]] = None group_ids: Optional[List[int]] = None ingroup_permission: Optional[str] = None @@ -295,6 +295,18 @@ class ToolInstanceInfoRequest(BaseModel): version_no: int = 0 +class SkillInstanceInfoRequest(BaseModel): + """Request model for skill instance update. + + Note: skill_description and skill_content are no longer accepted. + These fields are now retrieved from ag_skill_info_t table. + """ + skill_id: int + agent_id: int + enabled: bool = True + version_no: int = 0 + + class ToolInstanceSearchRequest(BaseModel): tool_id: int agent_id: int @@ -304,11 +316,13 @@ class ToolSourceEnum(Enum): LOCAL = "local" MCP = "mcp" LANGCHAIN = "langchain" + BUILTIN = "builtin" class ToolInfo(BaseModel): name: str description: str + description_zh: Optional[str] = None params: List source: str inputs: str diff --git a/backend/database/agent_version_db.py b/backend/database/agent_version_db.py index b2877bdb1..4df0158a8 100644 --- a/backend/database/agent_version_db.py +++ b/backend/database/agent_version_db.py @@ -3,7 +3,7 @@ from sqlalchemy import select, insert, update, func from database.client import get_db_session, as_dict -from database.db_models import AgentInfo, ToolInstance, AgentRelation, AgentVersion +from database.db_models import AgentInfo, ToolInstance, AgentRelation, AgentVersion, SkillInstance logger = logging.getLogger("agent_version_db") @@ -370,6 +370,34 @@ def delete_relation_snapshot( return result.rowcount +def delete_skill_snapshot( + agent_id: int, + tenant_id: str, + version_no: int, + deleted_by: str = None, +) -> int: + """ + Delete all skill instance snapshots for a version (used when deleting a version) + Returns: number of rows affected + """ + with get_db_session() as session: + values = {'delete_flag': 'Y'} + if deleted_by: + values['updated_by'] = deleted_by + values['update_time'] = func.now() + result = session.execute( + update(SkillInstance) + .where( + SkillInstance.agent_id == agent_id, + SkillInstance.tenant_id == tenant_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag == 'N', + ) + .values(**values) + ) + return result.rowcount + + def get_next_version_no( agent_id: int, tenant_id: str, @@ -410,4 +438,34 @@ def delete_version( ) rows_affected = result.rowcount logger.info(f"Delete version result: rows_affected={rows_affected} for agent_id={agent_id}, tenant_id={tenant_id}, version_no={version_no}") - return rows_affected \ No newline at end of file + return rows_affected + + +# ============== Skill Instance Snapshot Functions ============== + +def query_skill_instances_snapshot( + agent_id: int, + tenant_id: str, + version_no: int, +) -> List[dict]: + """ + Query skill instances snapshot for a specific version. + """ + with get_db_session() as session: + skills = session.query(SkillInstance).filter( + SkillInstance.agent_id == agent_id, + SkillInstance.tenant_id == tenant_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag == 'N', + ).all() + return [as_dict(s) for s in skills] + + +def insert_skill_snapshot( + skill_data: dict, +) -> None: + """ + Insert skill instance snapshot. + """ + with get_db_session() as session: + session.execute(insert(SkillInstance).values(**skill_data)) \ No newline at end of file diff --git a/backend/database/client.py b/backend/database/client.py index 37e5dba03..7f54532bf 100644 --- a/backend/database/client.py +++ b/backend/database/client.py @@ -268,10 +268,19 @@ def get_db_session(db_session=None): def as_dict(obj): + from datetime import datetime # Handle SQLAlchemy ORM objects (both TableBase and other DeclarativeBase subclasses) if hasattr(obj, '__class__') and hasattr(obj.__class__, '__mapper__'): - return {c.key: getattr(obj, c.key) for c in class_mapper(obj.__class__).columns} + result = {} + for c in class_mapper(obj.__class__).columns: + value = getattr(obj, c.key) + # Convert datetime to ISO format string for JSON serialization + if isinstance(value, datetime): + result[c.key] = value.isoformat() + else: + result[c.key] = value + return result # noinspection PyProtectedMember return dict(obj._mapping) diff --git a/backend/database/db_models.py b/backend/database/db_models.py index 80dcc87eb..a1b28334c 100644 --- a/backend/database/db_models.py +++ b/backend/database/db_models.py @@ -512,3 +512,58 @@ class UserTokenUsageLog(TableBase): call_function_name = Column(String(100), doc="API function name being called") related_id = Column(Integer, doc="Related resource ID (e.g., conversation_id)") meta_data = Column(JSONB, doc="Additional metadata for this usage log entry, stored as JSON") + + +class SkillInfo(TableBase): + """ + Skill information table - stores skill metadata and content. + """ + __tablename__ = "ag_skill_info_t" + __table_args__ = {"schema": SCHEMA} + + skill_id = Column(Integer, Sequence("ag_skill_info_t_skill_id_seq", schema=SCHEMA), + primary_key=True, nullable=False, autoincrement=True, doc="Skill ID") + skill_name = Column(String(100), nullable=False, unique=True, doc="Unique skill name") + skill_description = Column(String(1000), doc="Skill description") + skill_tags = Column(JSON, doc="Skill tags as JSON array") + skill_content = Column(Text, doc="Skill content in markdown format") + params = Column(JSON, doc="Skill configuration parameters as JSON object") + source = Column(String(30), nullable=False, default="official", + doc="Skill source: official, custom, etc.") + + +class SkillToolRelation(TableBase): + """ + Skill-Tool relation table - many-to-many relationship between skills and tools. + """ + __tablename__ = "ag_skill_tools_rel_t" + __table_args__ = {"schema": SCHEMA} + + rel_id = Column(Integer, Sequence("ag_skill_tools_rel_t_rel_id_seq", schema=SCHEMA), + primary_key=True, nullable=False, autoincrement=True, doc="Relation ID") + skill_id = Column(Integer, nullable=False, doc="Foreign key to ag_skill_info_t.skill_id") + tool_id = Column(Integer, nullable=False, doc="Foreign key to ag_tool_info_t.tool_id") + + +class SkillInstance(TableBase): + """ + Skill instance table - stores per-agent skill configuration. + Similar to ToolInstance, stores skill settings for each agent version. + Note: skill_description and skill_content removed - these are now retrieved from ag_skill_info_t. + """ + __tablename__ = "ag_skill_instance_t" + __table_args__ = {"schema": SCHEMA} + + skill_instance_id = Column( + Integer, + Sequence("ag_skill_instance_t_skill_instance_id_seq", schema=SCHEMA), + primary_key=True, + nullable=False, + doc="Skill instance ID" + ) + skill_id = Column(Integer, nullable=False, doc="Foreign key to ag_skill_info_t.skill_id") + agent_id = Column(Integer, nullable=False, doc="Agent ID") + user_id = Column(String(100), doc="User ID") + tenant_id = Column(String(100), doc="Tenant ID") + enabled = Column(Boolean, default=True, doc="Whether this skill is enabled for the agent") + version_no = Column(Integer, default=0, primary_key=True, nullable=False, doc="Version number. 0 = draft/editing state, >=1 = published snapshot") diff --git a/backend/database/skill_db.py b/backend/database/skill_db.py new file mode 100644 index 000000000..a6a483af4 --- /dev/null +++ b/backend/database/skill_db.py @@ -0,0 +1,447 @@ +"""Skill instance and skill info database operations.""" + +import json +import logging +from datetime import datetime +from typing import Any, Dict, List, Optional + +from sqlalchemy import update as sa_update + +from database.client import get_db_session, filter_property, as_dict +from database.db_models import SkillInfo, SkillToolRelation, SkillInstance, ToolInfo +from utils.skill_params_utils import strip_params_comments_for_db + +logger = logging.getLogger(__name__) + + +def _params_value_for_db(raw: Any) -> Any: + """Strip UI/YAML comment metadata, then JSON round-trip for the DB JSON column.""" + if raw is None: + return None + stripped = strip_params_comments_for_db(raw) + return json.loads(json.dumps(stripped, default=str)) + + +def create_or_update_skill_by_skill_info(skill_info, tenant_id: str, user_id: str, version_no: int = 0): + """ + Create or update a SkillInstance in the database. + Default version_no=0 operates on the draft version. + + Args: + skill_info: Dictionary or object containing skill instance information + tenant_id: Tenant ID for filtering, mandatory + user_id: User ID for updating (will be set as the last updater) + version_no: Version number to filter. Default 0 = draft/editing state + + Returns: + Created or updated SkillInstance object + """ + skill_info_dict = skill_info.__dict__ if hasattr(skill_info, '__dict__') else skill_info + skill_info_dict = skill_info_dict.copy() + skill_info_dict.setdefault("tenant_id", tenant_id) + skill_info_dict.setdefault("user_id", user_id) + skill_info_dict.setdefault("version_no", version_no) + skill_info_dict.setdefault("created_by", user_id) + skill_info_dict.setdefault("updated_by", user_id) + + with get_db_session() as session: + query = session.query(SkillInstance).filter( + SkillInstance.tenant_id == tenant_id, + SkillInstance.agent_id == skill_info_dict.get('agent_id'), + SkillInstance.delete_flag != 'Y', + SkillInstance.skill_id == skill_info_dict.get('skill_id'), + SkillInstance.version_no == version_no + ) + skill_instance = query.first() + + if skill_instance: + for key, value in skill_info_dict.items(): + if hasattr(skill_instance, key): + setattr(skill_instance, key, value) + else: + new_skill_instance = SkillInstance( + **filter_property(skill_info_dict, SkillInstance)) + session.add(new_skill_instance) + session.flush() + skill_instance = new_skill_instance + + return as_dict(skill_instance) + + +def query_skill_instances_by_agent_id(agent_id: int, tenant_id: str, version_no: int = 0): + """Query all SkillInstance for an agent (regardless of enabled status).""" + with get_db_session() as session: + query = session.query(SkillInstance).filter( + SkillInstance.tenant_id == tenant_id, + SkillInstance.agent_id == agent_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag != 'Y') + skill_instances = query.all() + return [as_dict(skill_instance) for skill_instance in skill_instances] + + +def query_enabled_skill_instances(agent_id: int, tenant_id: str, version_no: int = 0): + """Query enabled SkillInstance in the database.""" + with get_db_session() as session: + query = session.query(SkillInstance).filter( + SkillInstance.tenant_id == tenant_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag != 'Y', + SkillInstance.enabled, + SkillInstance.agent_id == agent_id) + skill_instances = query.all() + return [as_dict(skill_instance) for skill_instance in skill_instances] + + +def query_skill_instance_by_id(agent_id: int, skill_id: int, tenant_id: str, version_no: int = 0): + """Query SkillInstance in the database by agent_id and skill_id.""" + with get_db_session() as session: + query = session.query(SkillInstance).filter( + SkillInstance.tenant_id == tenant_id, + SkillInstance.agent_id == agent_id, + SkillInstance.skill_id == skill_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag != 'Y') + skill_instance = query.first() + if skill_instance: + return as_dict(skill_instance) + else: + return None + + +def search_skills_for_agent(agent_id: int, tenant_id: str, version_no: int = 0): + """Query enabled skills for an agent with skill content from SkillInstance.""" + with get_db_session() as session: + query = session.query(SkillInstance).filter( + SkillInstance.agent_id == agent_id, + SkillInstance.tenant_id == tenant_id, + SkillInstance.version_no == version_no, + SkillInstance.delete_flag != 'Y', + SkillInstance.enabled + ) + + skill_instances = query.all() + return [as_dict(skill_instance) for skill_instance in skill_instances] + + +def delete_skills_by_agent_id(agent_id: int, tenant_id: str, user_id: str, version_no: int = 0): + """Delete all skill instances for an agent.""" + with get_db_session() as session: + session.query(SkillInstance).filter( + SkillInstance.agent_id == agent_id, + SkillInstance.tenant_id == tenant_id, + SkillInstance.version_no == version_no + ).update({ + SkillInstance.delete_flag: 'Y', 'updated_by': user_id + }) + + +def delete_skill_instances_by_skill_id(skill_id: int, user_id: str): + """Soft delete all skill instances for a specific skill. + + This is called when a skill is deleted to clean up associated skill instances. + + Args: + skill_id: ID of the skill to delete instances for + user_id: User ID for the updated_by field + """ + with get_db_session() as session: + session.query(SkillInstance).filter( + SkillInstance.skill_id == skill_id, + SkillInstance.delete_flag != 'Y' + ).update({ + SkillInstance.delete_flag: 'Y', + 'updated_by': user_id + }) + + +# ============== SkillInfo Repository Functions ============== + + +def _get_tool_ids(session, skill_id: int) -> List[int]: + """Get tool IDs for a skill.""" + relations = session.query(SkillToolRelation).filter( + SkillToolRelation.skill_id == skill_id + ).all() + return [r.tool_id for r in relations] + + +def _to_dict(skill: SkillInfo) -> Dict[str, Any]: + """Convert SkillInfo to dict.""" + return { + "skill_id": skill.skill_id, + "name": skill.skill_name, + "description": skill.skill_description, + "tags": skill.skill_tags or [], + "content": skill.skill_content or "", + "params": skill.params if skill.params is not None else {}, + "source": skill.source, + "created_by": skill.created_by, + "create_time": skill.create_time.isoformat() if skill.create_time else None, + "updated_by": skill.updated_by, + "update_time": skill.update_time.isoformat() if skill.update_time else None, + } + + +def list_skills() -> List[Dict[str, Any]]: + """List all skills from database.""" + with get_db_session() as session: + skills = session.query(SkillInfo).filter( + SkillInfo.delete_flag != 'Y' + ).all() + results = [] + for s in skills: + result = _to_dict(s) + result["tool_ids"] = _get_tool_ids(session, s.skill_id) + results.append(result) + return results + + +def get_skill_by_name(skill_name: str) -> Optional[Dict[str, Any]]: + """Get skill by name.""" + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_name == skill_name, + SkillInfo.delete_flag != 'Y' + ).first() + if skill: + result = _to_dict(skill) + result["tool_ids"] = _get_tool_ids(session, skill.skill_id) + return result + return None + + +def get_skill_by_id(skill_id: int) -> Optional[Dict[str, Any]]: + """Get skill by ID.""" + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_id == skill_id, + SkillInfo.delete_flag != 'Y' + ).first() + if skill: + result = _to_dict(skill) + result["tool_ids"] = _get_tool_ids(session, skill.skill_id) + return result + return None + + +def create_skill(skill_data: Dict[str, Any]) -> Dict[str, Any]: + """Create a new skill.""" + with get_db_session() as session: + skill = SkillInfo( + skill_name=skill_data["name"], + skill_description=skill_data.get("description", ""), + skill_tags=skill_data.get("tags", []), + skill_content=skill_data.get("content", ""), + params=_params_value_for_db(skill_data.get("params")), + source=skill_data.get("source", "custom"), + created_by=skill_data.get("created_by"), + create_time=datetime.now(), + updated_by=skill_data.get("updated_by"), + update_time=datetime.now(), + ) + session.add(skill) + session.flush() + + skill_id = skill.skill_id + + tool_ids = skill_data.get("tool_ids", []) + if tool_ids: + for tool_id in tool_ids: + rel = SkillToolRelation( + skill_id=skill_id, + tool_id=tool_id, + create_time=datetime.now() + ) + session.add(rel) + + session.commit() + + result = _to_dict(skill) + result["tool_ids"] = tool_ids + return result + + +def update_skill( + skill_name: str, + skill_data: Dict[str, Any], + updated_by: Optional[str] = None, +) -> Dict[str, Any]: + """Update an existing skill. + + Args: + skill_name: Skill name (unique key). + skill_data: Business fields to update (description, content, tags, source, params, tool_ids). + updated_by: Actor user id from server-side auth; never taken from the HTTP request body. + + Notes: + Uses a single Core UPDATE for ag_skill_info_t columns. Mixing ORM attribute assignment + with session.execute(update()) can let autoflush emit an UPDATE that overwrites JSON + params with stale in-memory values, so we avoid ORM writes for this row. + """ + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_name == skill_name, + SkillInfo.delete_flag != "Y", + ).first() + + if not skill: + raise ValueError(f"Skill not found: {skill_name}") + + skill_id = skill.skill_id + now = datetime.now() + row_values: Dict[str, Any] = {"update_time": now} + if updated_by: + row_values["updated_by"] = updated_by + + if "description" in skill_data: + row_values["skill_description"] = skill_data["description"] + if "content" in skill_data: + row_values["skill_content"] = skill_data["content"] + if "tags" in skill_data: + row_values["skill_tags"] = skill_data["tags"] + if "source" in skill_data: + row_values["source"] = skill_data["source"] + if "params" in skill_data: + row_values["params"] = _params_value_for_db(skill_data["params"]) + + session.execute( + sa_update(SkillInfo) + .where( + SkillInfo.skill_id == skill_id, + SkillInfo.delete_flag != "Y", + ) + .values(**row_values) + ) + + if "tool_ids" in skill_data: + session.query(SkillToolRelation).filter( + SkillToolRelation.skill_id == skill_id + ).delete() + + for tool_id in skill_data["tool_ids"]: + rel = SkillToolRelation( + skill_id=skill_id, + tool_id=tool_id, + create_time=datetime.now() + ) + session.add(rel) + + session.commit() + + refreshed = session.query(SkillInfo).filter( + SkillInfo.skill_id == skill_id, + SkillInfo.delete_flag != "Y", + ).first() + if not refreshed: + raise ValueError(f"Skill not found after update: {skill_name}") + + result = _to_dict(refreshed) + result["tool_ids"] = skill_data.get( + "tool_ids", + _get_tool_ids(session, skill_id), + ) + return result + + +def delete_skill(skill_name: str, updated_by: Optional[str] = None) -> bool: + """Soft delete a skill (mark as deleted). + + Args: + skill_name: Name of the skill to delete + updated_by: User ID of the user performing the delete + + Returns: + True if deleted successfully + """ + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_name == skill_name + ).first() + + if not skill: + return False + + skill_id = skill.skill_id + skill.delete_flag = 'Y' + skill.update_time = datetime.now() + if updated_by: + skill.updated_by = updated_by + + session.query(SkillInstance).filter( + SkillInstance.skill_id == skill_id, + SkillInstance.delete_flag != 'Y' + ).update({ + SkillInstance.delete_flag: 'Y', + 'updated_by': updated_by + }) + + session.commit() + return True + + +def get_tool_names_by_ids(session, tool_ids: List[int]) -> List[str]: + """Get tool names from tool IDs.""" + if not tool_ids: + return [] + tools = session.query(ToolInfo.name).filter( + ToolInfo.tool_id.in_(tool_ids) + ).all() + return [t.name for t in tools] + + +def get_tool_ids_by_names(tool_names: List[str], tenant_id: str) -> List[int]: + """Get tool IDs from tool names. + + Args: + tool_names: List of tool names + tenant_id: Tenant ID + + Returns: + List of tool IDs + """ + if not tool_names: + return [] + with get_db_session() as session: + tools = session.query(ToolInfo.tool_id).filter( + ToolInfo.name.in_(tool_names), + ToolInfo.delete_flag != 'Y', + ToolInfo.author == tenant_id + ).all() + return [t.tool_id for t in tools] + + +def get_tool_names_by_skill_name(skill_name: str) -> List[str]: + """Get tool names for a skill by skill name. + + Args: + skill_name: Name of the skill + + Returns: + List of tool names + """ + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_name == skill_name, + SkillInfo.delete_flag != 'Y' + ).first() + if not skill: + return [] + tool_ids = _get_tool_ids(session, skill.skill_id) + return get_tool_names_by_ids(session, tool_ids) + + +def get_skill_with_tool_names(skill_name: str) -> Optional[Dict[str, Any]]: + """Get skill with tool names included.""" + with get_db_session() as session: + skill = session.query(SkillInfo).filter( + SkillInfo.skill_name == skill_name, + SkillInfo.delete_flag != 'Y' + ).first() + if skill: + result = _to_dict(skill) + tool_ids = _get_tool_ids(session, skill.skill_id) + result["tool_ids"] = tool_ids + result["allowed_tools"] = get_tool_names_by_ids(session, tool_ids) + return result + return None diff --git a/backend/database/tool_db.py b/backend/database/tool_db.py index 2a64c47d6..4d34ede9b 100644 --- a/backend/database/tool_db.py +++ b/backend/database/tool_db.py @@ -1,10 +1,11 @@ import re +import json from typing import List - from database.agent_db import logger from database.client import get_db_session, filter_property, as_dict from database.db_models import ToolInstance, ToolInfo from consts.model import ToolSourceEnum +from utils.tool_utils import get_local_tools_description_zh def create_tool(tool_info, version_no: int = 0): @@ -225,15 +226,15 @@ def update_tool_table_from_scan_tool_list(tenant_id: str, user_id: str, tool_lis is_available = True if re.match( r'^[a-zA-Z_][a-zA-Z0-9_]*$', tool.name) is not None else False - # Use same key generation logic as above + # Build key for lookup - same logic as existing_tool_dict if tool.source == ToolSourceEnum.MCP.value: - tool_key = f"{tool.name}&{tool.source}&{tool.usage or ''}" + key = f"{tool.name}&{tool.source}&{tool.usage or ''}" else: - tool_key = f"{tool.name}&{tool.source}" + key = f"{tool.name}&{tool.source}" - if tool_key in existing_tool_dict: - # by tool name, source, and usage (for MCP) to update the existing tool - existing_tool = existing_tool_dict[tool_key] + if key in existing_tool_dict: + # by tool name and source to update the existing tool + existing_tool = existing_tool_dict[key] for key, value in filtered_tool_data.items(): setattr(existing_tool, key, value) existing_tool.updated_by = user_id @@ -253,16 +254,44 @@ def add_tool_field(tool_info): query = session.query(ToolInfo).filter( ToolInfo.tool_id == tool_info["tool_id"]) tool = query.first() - # add tool params tool_params = tool.params for ele in tool_params: param_name = ele["name"] ele["default"] = tool_info["params"].get(param_name) - tool_dict = as_dict(tool) tool_dict["params"] = tool_params - + + # Merge description_zh from SDK for local tools + tool_name = tool_dict.get("name") + if tool_dict.get("source") == "local": + local_tool_descriptions = get_local_tools_description_zh() + if tool_name in local_tool_descriptions: + sdk_info = local_tool_descriptions[tool_name] + tool_dict["description_zh"] = sdk_info.get("description_zh") + + # Merge params description_zh from SDK + for param in tool_params: + if not param.get("description_zh"): + for sdk_param in sdk_info.get("params", []): + if sdk_param.get("name") == param.get("name"): + param["description_zh"] = sdk_param.get("description_zh") + break + + # Merge inputs description_zh from SDK + inputs_str = tool_dict.get("inputs", "{}") + try: + inputs = json.loads(inputs_str) if isinstance(inputs_str, str) else inputs_str + if isinstance(inputs, dict): + for key, value in inputs.items(): + if isinstance(value, dict) and not value.get("description_zh"): + sdk_inputs = sdk_info.get("inputs", {}) + if key in sdk_inputs: + value["description_zh"] = sdk_inputs[key].get("description_zh") + tool_dict["inputs"] = json.dumps(inputs, ensure_ascii=False) + except (json.JSONDecodeError, TypeError): + pass + # combine tool_info and tool_dict tool_info.update(tool_dict) return tool_info @@ -331,7 +360,6 @@ def delete_tools_by_agent_id(agent_id, tenant_id, user_id, version_no: int = 0): ToolInstance.delete_flag: 'Y', 'updated_by': user_id }) - def search_last_tool_instance_by_tool_id(tool_id: int, tenant_id: str, user_id: str, version_no: int = 0): """ Query the latest ToolInstance by tool_id. @@ -355,4 +383,4 @@ def search_last_tool_instance_by_tool_id(tool_id: int, tenant_id: str, user_id: ToolInstance.delete_flag != 'Y' ).order_by(ToolInstance.update_time.desc()) tool_instance = query.first() - return as_dict(tool_instance) if tool_instance else None + return as_dict(tool_instance) if tool_instance else None \ No newline at end of file diff --git a/backend/prompts/managed_system_prompt_template_en.yaml b/backend/prompts/managed_system_prompt_template_en.yaml index 9c3a2799c..82fc4d982 100644 --- a/backend/prompts/managed_system_prompt_template_en.yaml +++ b/backend/prompts/managed_system_prompt_template_en.yaml @@ -119,11 +119,10 @@ system_prompt: |- 4. Use tool input parameters correctly, use keyword arguments, not dictionary format; 5. Avoid making too many tool calls in one round of conversation, as this will make the output format unpredictable; 6. Only call tools when needed, do not repeat calls with the same parameters; - 7. Only import from the following modules: {{authorized_imports}}; - 8. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. Saved information persists between code executions. The content printed by print() should be treated as a string, do not perform dictionary-related operations such as .get(), [] etc., to avoid type errors; - 9. Avoid using **if**, **for**, and other logic in example code, only call tools. Each action in the example is a deterministic event. If there are different conditions, you should provide examples for different conditions; - 10. Use keyword arguments for tool calls, such as: tool_name(param1="value1", param2="value2"); - 11. Don't give up! You are responsible for solving the task, not providing solution directions. + 7. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. Saved information persists between code executions. The content printed by print() should be treated as a string, do not perform dictionary-related operations such as .get(), [] etc., to avoid type errors; + 8. Avoid using **if**, **for**, and other logic in example code, only call tools. Each action in the example is a deterministic event. If there are different conditions, you should provide examples for different conditions; + 9. Use keyword arguments for tool calls, such as: tool_name(param1="value1", param2="value2"); + 10. Don't give up! You are responsible for solving the task, not providing solution directions. ### Example Templates {{ few_shots }} diff --git a/backend/prompts/managed_system_prompt_template_zh.yaml b/backend/prompts/managed_system_prompt_template_zh.yaml index b89dcc405..c8f3e393a 100644 --- a/backend/prompts/managed_system_prompt_template_zh.yaml +++ b/backend/prompts/managed_system_prompt_template_zh.yaml @@ -1,152 +1,412 @@ system_prompt: |- + ### 基本信息 - 你是{{APP_NAME}},{{APP_DESCRIPTION}},现在是{{time|default('当前时间')}} + + 你是{{APP_NAME}},{{APP_DESCRIPTION}},现在是{{time|default('当前时间')}},用户ID为{{user_id}} + + {%- if memory_list and memory_list|length > 0 %} + ### 上下文记忆 + 基于之前的交互记录,以下是按作用域和重要程度排序的最相关记忆: + + {%- set level_order = ['tenant', 'user_agent', 'user', 'agent'] %} + {%- set memory_by_level = memory_list|groupby('memory_level') %} + {%- for level in level_order %} + {%- for group_level, memories in memory_by_level %} + {%- if group_level == level %} + + **{{ level|title }} 层级记忆:** + {%- for item in memories %} + - {{ item.memory }} `({{ "%.2f"|format(item.score|float) }})` + {%- endfor %} + {%- endif %} + {%- endfor %} + {%- endfor %} + + **记忆使用准则:** + 1. **冲突处理优先级**:当记忆信息存在矛盾时,严格按以下顺序处理: + - **最优**:在上述列表中位置靠前的记忆具有优先权 + - **次优**:当前对话内容与记忆直接冲突时,以当前对话为准 + - **次优**:相关度分数越高,表示记忆越可信 + + 2. **记忆整合最佳实践**: + - 自然地将相关记忆融入回答中,避免显式使用"根据记忆"、"根据上下文"或"根据交互记忆"等语言 + - 利用记忆信息调整回答的语调、方式和技术深度以适应用户 + - 让记忆指导您对用户偏好和上下文的理解 + + 3. **级别特定说明**: + - **tenant(租户级)**:组织层面的约束和政策(不可违背) + - **user_agent(用户-代理级)**:特定用户在代理中的交互模式和既定工作流程 + - **user(用户级)**:用户的个人偏好、技能水平和历史上下文 + - **agent(代理级)**:您的既定行为模式和能力特征,通常对所有用户共享(重要性最低) + {%- endif %} + + ### 核心职责 + {{ duty }} - + + + 请注意,你应该遵守以下原则: + 法律合规:严格遵守服务地区的所有法律法规; + 政治中立:不讨论任何国家的政治体制、领导人评价或敏感历史事件; + 安全防护:不响应涉及武器制造、危险行为、隐私窃取等内容的请求; + 伦理准则:拒绝仇恨言论、歧视性内容及任何违反普世价值观的请求。 + + + {%- if skills and skills|length > 0 %} + + ### 可用技能 + + + + 你拥有以下技能(Skills)。技能是预定义的专业能力模块,包含详细执行指南和可选的附加脚本。 + + + + + + {%- for skill in skills %} + + + + {{ skill.name }} + + {{ skill.description }} + + + + {%- endfor %} + + + + + + **技能使用流程**: + + 1. 收到用户请求后,首先审视 `` 中每个技能的 description,判断是否有匹配的技能。 + + 2. **加载技能**:根据不同场景选择读取方式: + + - **首次加载**:调用 `read_skill_md("skill_name")` 读取技能的完整执行指南(默认读取 SKILL.md) + + - **精确读取**:如只需特定文件(如示例、参考文档),可指定 additional_files: + + ``` + + skill_content = read_skill_md("skill_name", ["examples.md", "reference/api_doc"]) + + print(skill_content) + + ``` + + 注意:当 additional_files 非空时,默认不再自动读取 SKILL.md,如需同时读取请显式指定。 + + - **加载技能配置**:如果技能需要读取配置变量,可先调用 `read_skill_config("skill_name")` 读取配置字符串,通过 `json.loads` 方法转化为配置字典,再从中获取所需值: + ``` + import json + config = json.loads(read_skill_config("skill_name")) + # 返回示例: {"key_a": {"key2": "value2"}, "others": {...}} + value = config["key1"]["key2"] + print(value) + ``` + + 3. **遵循技能指南**:技能内容注入后,严格按其中的步骤执行。不要跳过技能指南中的步骤,也不要用自行编写的代码替代技能定义的流程。 + + 4. **执行技能脚本**:如果技能指南中引用了附加脚本(形如 ``),使用以下格式调用: + + 代码: + + ``` + + # 参数使用 -- 前缀传递命令行参数 + # 布尔参数传 True 即可(如 --wait) + # 列表参数会自动展开(如 --names ["vm1", "vm2"] -> --names vm1 vm2) + result = run_skill_script("skill_name", "script_path", {"--param1": "value1", "--flag": True}) + + print(result) + + ``` + + 注意:只执行技能指南中明确声明的脚本路径,绝不自行构造脚本路径。 + + 5. **整合输出**:根据技能指南要求的输出格式,结合脚本执行结果生成最终回答。 + + 6. **引用场景处理**:当技能内容中出现引用标记或需要引用其他文件时,需要识别并再次调用 read_skill_md: + + - **引用模板识别**:注意技能内容中形如 `` 或自然语言式的引用声明(如"详见 examples.md"、"请参考 reference/api_doc") + + - **自动补全**:发现引用后,尝试读取被引用的文件获取更多信息 + + - **示例**: + + ``` + + # 技能内容提示"请参考 examples.md 获取详细示例" + + additional_info = read_skill_md("skill_name", ["examples.md"]) + + print(additional_info) + + ``` + + {%- endif %} + + + ### 执行流程 + 要解决任务,你必须通过一系列步骤向前规划,以'思考:'、'代码:'和'观察结果:'序列的循环进行: + 1. 思考: + - 确定需要使用哪些工具获取信息或行动 + {%- if memory_list and memory_list|length > 0 %} + - 合理参考之前交互中的上下文记忆信息 + {%- endif %} + - 解释你的决策逻辑和预期结果 + + 2. 代码: + - 用简单的Python编写代码 + - 遵循python代码规范和python语法 + - 根据格式规范正确调用工具 + - 考虑到代码执行与展示用户代码的区别,使用'代码:\n```\n'开头,并以'```'表达运行代码,使用'代码:\n```\n'开头,并以'```'表达展示代码 + - 注意运行的代码不会被用户看到,所以如果用户需要看到代码,你需要使用'代码:\n```\n'开头,并以'```'表达展示代码。 + 3. 观察结果: + - 查看代码执行结果 - + + 在思考结束后,当你认为可以回答用户问题,那么可以不生成代码,直接生成最终回答给到用户并停止循环。 - + + + 生成最终回答时,你需要遵循以下规范: + 1. **Markdown格式要求**: + - 使用标准Markdown语法格式化输出,支持标题、列表、表格、代码块、链接等 + - 展示图片和视频使用链接方式,不需要外套代码块,格式:[链接文本](URL),图片格式:![alt文本](图片URL),视频格式: + - 段落之间使用单个空行分隔,避免多个连续空行 + - 数学公式使用标准Markdown格式:行内公式用 $公式$,块级公式用 $$公式$$ - + + + 2. **引用标记规范**(仅在使用了检索工具时): + - 引用标记格式必须严格为:`[[字母+数字]]`,例如:`[[a1]]`、`[[b2]]`、`[[c3]]` + - 字母部分必须是单个小写字母(a-e),数字部分必须是整数 + - 引用标记的字母和数字必须与检索工具的检索结果一一对应 + - 引用标记应紧跟在相关信息或句子之后,通常放在句末或段落末尾 + - 多个引用标记可以连续使用,例如:`[[a1]][[b2]]` + - **重要**:仅添加引用标记,不要添加链接、参考文献列表等多余内容 + - 如果检索结果中没有匹配的引用,则不显示该引用标记 - + + + 3. **格式细节要求**: + - 避免在Markdown中使用HTML标签,优先使用Markdown原生语法 + - 代码块中的代码应保持原始格式,不要添加额外的转义字符 + - 若未使用检索工具,则不添加任何引用标记 - + + + 注意最后生成的回答要语义连贯,信息清晰,可读性高。 - + + + ### 可用资源 + {%- if tools and tools.values() | list %} + - 你只能使用以下工具,不得使用任何其他工具: + {%- for tool in tools.values() %} + - {{ tool.name }}: {{ tool.description }} + 接受输入: {{tool.inputs}} + 返回输出类型: {{tool.output_type}} + {%- endfor %} + + {%- if knowledge_base_summary %} + - knowledge_base_search工具只能使用以下知识库索引,请根据用户问题选择最相关的一个或多个知识库索引: + {{ knowledge_base_summary }} + {%- endif %} + {%- else %} + - 当前没有可用的工具 + {%- endif %} - + + + + {%- if skills and skills|length > 0 %} + + - 你拥有上述 `` 中列出的技能。技能中引用的脚本通过 `run_skill_script()` 函数调用,该函数由平台提供,不需要导入。 + + + + ### 技能使用要求 + + 1. **技能优先**:如果用户请求匹配了某个技能的 description,必须先调用 `read_skill_md()` 加载技能指南,再按指南执行。不得跳过技能自行编写代码解决。 + + 2. **忠实执行**:读取技能内容后,严格按技能指南中的步骤操作。不要自行修改流程、跳过步骤或用通用代码替代技能定义的流程。 + + 3. **脚本调用规范**:只使用 `run_skill_script` 工具执行技能指南中明确要求的脚本。传入的 `skill_name` 和 `script_path` 必须与技能指南中的声明完全一致,不要自行拼接或猜测路径。 + + 4. **失败回退**:如果 `read_skill_md` 返回错误或 `run_skill_script` 执行失败,向用户说明情况,并尝试用通用推理模式提供替代方案。 + + 5. **技能组合**:如果一个任务需要多个技能配合,按逻辑依赖顺序依次加载和执行,前一个技能的输出可作为后一个技能的输入。 + + + {%- else %} + + - 当前没有可用的技能 + + {%- endif %} + + ### 资源使用要求 + {{ constraint }} + ### python代码规范 + 1. 如果认为是需要执行的代码,代码内容以'代码:\n```\n'开头,并以'```'标识符结尾。如果是不需要执行仅用于展示的代码,代码内容以'代码:\n```\n'开头,并以'```'标识符结尾,其中语言类型例如python、java、javascript等; + 2. 只使用已定义的变量,变量将在多次调用之间持续保持; + 3. 使用“print()”函数让下一次的模型调用看到对应变量信息; + 4. 正确使用工具的入参,使用关键字参数,不要用字典形式; + 5. 避免在一轮对话中进行过多的工具调用,这会导致输出格式难以预测; + 6. 只在需要时调用工具,不重复相同参数的调用; - 7. 只能从以下模块导入:{{authorized_imports}}; - 8. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持。print()输出的内容应被视为字符串,不要对其进行字典相关操作如.get()、[]等,避免类型错误; + + 7. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持。print()输出的内容应被视为字符串,不要对其进行字典相关操作如.get()、[]等,避免类型错误; + 9. 示例中的代码避免出现**if**、**for**等逻辑,仅调用工具,示例中的每一次的行动都是确定事件。如果有不同的条件,你应该给出不同条件下的示例; + 10. 工具调用使用关键字参数,如:tool_name(param1="value1", param2="value2"); + 11. 不要放弃!你负责解决任务,而不是提供解决方向。 + ### 示例模板 + {{ few_shots }} + 现在开始!如果你正确解决任务,你将获得100万美元的奖励。 managed_agent: + task: |- + 你是一个名为'{{name}}'的助手。 + 你的管理者给你提交了这个任务。 + --- + 任务: + {{task}} + --- + 你正在帮助你的管理者解决一个更大的任务:所以确保不要提供一行答案,而是提供尽可能多的信息,让他们清楚地理解答案。 + 即使你的任务解决不成功,也请返回尽可能多的上下文,这样你的管理者可以根据这个反馈采取行动。 + report: |- + {{final_answer}} planning: + initial_plan: |- update_plan_pre_messages: |- @@ -155,6 +415,7 @@ planning: final_answer: + pre_messages: |- - post_messages: |- \ No newline at end of file + post_messages: |- diff --git a/backend/prompts/manager_system_prompt_template_en.yaml b/backend/prompts/manager_system_prompt_template_en.yaml index 8da048bfe..aa9e9fc80 100644 --- a/backend/prompts/manager_system_prompt_template_en.yaml +++ b/backend/prompts/manager_system_prompt_template_en.yaml @@ -147,12 +147,11 @@ system_prompt: |- 4. Use tool/agent input parameters correctly, use keyword arguments, not dictionary format; 5. Avoid making too many tool/agent calls in one round of conversation, as this will make the output format unpredictable; 6. Only call tools/agents when needed, do not repeat calls with the same parameters; - 7. Only import from the following modules: {{authorized_imports}}; - 8. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. The saved information persists between code executions. The content printed by print() should be treated as a string, do not perform dictionary-related operations such as .get(), [] etc., to avoid type errors; - 9. Avoid **if**, **for** and other logic in example code, only call tools/agents. Each action in the example is a deterministic event. If there are different conditions, you should provide examples under different conditions; - 10. Tool calls use keyword arguments, such as: tool_name(param1="value1", param2="value2"); - 11. Agent calls must use task parameter, such as: agent_name(task="task description"); - 12. Don't give up! You are responsible for solving the task, not providing solution directions. + 7. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. The saved information persists between code executions. The content printed by print() should be treated as a string, do not perform dictionary-related operations such as .get(), [] etc., to avoid type errors; + 8. Avoid **if**, **for** and other logic in example code, only call tools/agents. Each action in the example is a deterministic event. If there are different conditions, you should provide examples under different conditions; + 9. Tool calls use keyword arguments, such as: tool_name(param1="value1", param2="value2"); + 10. Agent calls must use task parameter, such as: agent_name(task="task description"); + 11. Don't give up! You are responsible for solving the task, not providing solution directions. ### Example Templates {{ few_shots }} diff --git a/backend/prompts/manager_system_prompt_template_zh.yaml b/backend/prompts/manager_system_prompt_template_zh.yaml index 8effcd54a..3829c1439 100644 --- a/backend/prompts/manager_system_prompt_template_zh.yaml +++ b/backend/prompts/manager_system_prompt_template_zh.yaml @@ -1,6 +1,6 @@ system_prompt: |- ### 基本信息 - 你是{{APP_NAME}},{{APP_DESCRIPTION}}, 现在是{{time|default('当前时间')}} + 你是{{APP_NAME}},{{APP_DESCRIPTION}},现在是{{time|default('当前时间')}},用户ID为{{user_id}} {%- if memory_list and memory_list|length > 0 %} ### 上下文记忆 @@ -40,13 +40,73 @@ system_prompt: |- ### 核心职责 {{ duty }} - + 请注意,你应该遵守以下原则: 法律合规:严格遵守服务地区的所有法律法规; 政治中立:不讨论任何国家的政治体制、领导人评价或敏感历史事件; 安全防护:不响应涉及武器制造、危险行为、隐私窃取等内容的请求; 伦理准则:拒绝仇恨言论、歧视性内容及任何违反普世价值观的请求。 + {%- if skills and skills|length > 0 %} + ### 可用技能 + + 你拥有以下技能(Skills)。技能是预定义的专业能力模块,包含详细执行指南和可选的附加脚本。 + + + {%- for skill in skills %} + + {{ skill.name }} + {{ skill.description }} + + {%- endfor %} + + + **技能使用流程**: + 1. 收到用户请求后,首先审视 `` 中每个技能的 description,判断是否有匹配的技能。 + 2. **加载技能**:根据不同场景选择读取方式: + - **首次加载**:调用 `read_skill_md("skill_name")` 读取技能的完整执行指南(默认读取 SKILL.md) + - **精确读取**:如只需特定文件(如示例、参考文档),可指定 additional_files: + ``` + skill_content = read_skill_md("skill_name", ["examples.md", "reference/api_doc"]) + print(skill_content) + ``` + 注意:当 additional_files 非空时,默认不再自动读取 SKILL.md,如需同时读取请显式指定。 + + - **加载技能配置**:如果技能需要读取配置变量,可先调用 `read_skill_config("skill_name")` 读取配置字符串,通过 `json.loads` 方法转化为配置字典,再从中获取所需值: + ``` + import json + config = json.loads(read_skill_config("skill_name")) + # 返回示例: {"key_a": {"key2": "value2"}, "others": {...}} + value = config["key1"]["key2"] + print(value) + ``` + + 3. **遵循技能指南**:技能内容注入后,严格按其中的步骤执行。不要跳过技能指南中的步骤,也不要用自行编写的代码替代技能定义的流程。 + + 4. **执行技能脚本**:如果技能指南中引用了附加脚本(形如 ``),使用以下格式调用: + 代码: + ``` + # 参数使用 -- 前缀传递命令行参数 + # 布尔参数传 True 即可(如 --wait) + # 列表参数会自动展开(如 --names ["vm1", "vm2"] -> --names vm1 vm2) + result = run_skill_script("skill_name", "script_path", {"--param1": "value1", "--flag": True}) + print(result) + ``` + 注意:只执行技能指南中明确声明的脚本路径,绝不自行构造脚本路径。 + + 5. **整合输出**:根据技能指南要求的输出格式,结合脚本执行结果生成最终回答。 + + 6. **引用场景处理**:当技能内容中出现引用标记或需要引用其他文件时,需要识别并再次调用 read_skill_md: + - **引用模板识别**:注意技能内容中形如 `` 或自然语言式的引用声明(如"详见 examples.md"、"请参考 reference/api_doc") + - **自动补全**:发现引用后,尝试读取被引用的文件获取更多信息 + - **示例**: + ``` + # 技能内容提示"请参考 examples.md 获取详细示例" + additional_info = read_skill_md("skill_name", ["examples.md"]) + print(additional_info) + ``` + {%- endif %} + ### 执行流程 要解决任务,你必须通过一系列步骤向前规划,以'思考:'、'代码:'和'观察结果:'序列的循环进行: @@ -68,16 +128,16 @@ system_prompt: |- 3. 观察结果: - 查看代码执行结果 - 根据结果决定下一步行动 - + 在思考结束后,当你认为可以回答用户问题,那么可以不生成代码,直接生成最终回答给到用户并停止循环。 - + 生成最终回答时,你需要遵循以下规范: 1. Markdown格式要求: - 使用标准Markdown语法格式化输出,支持标题、列表、表格、代码块、链接等 - 展示图片和视频使用链接方式,不需要外套代码块,格式:[链接文本](URL),图片格式:![alt文本](图片URL),视频格式: - 段落之间使用单个空行分隔,避免多个连续空行 - 数学公式使用标准Markdown格式:行内公式用 $公式$,块级公式用 $$公式$$ - + 2. 引用标记规范(仅在使用了检索工具时): - 引用标记格式必须严格为:`[[字母+数字]]`,例如:`[[a1]]`、`[[b2]]`、`[[c3]]` - 字母部分必须是单个小写字母(a-e),数字部分必须是整数 @@ -86,12 +146,12 @@ system_prompt: |- - 多个引用标记可以连续使用,例如:`[[a1]][[b2]]` - **重要**:仅添加引用标记,不要添加链接、参考文献列表等多余内容 - 如果检索结果中没有匹配的引用,则不显示该引用标记 - + 3. 格式细节要求: - 避免在Markdown中使用HTML标签,优先使用Markdown原生语法 - 代码块中的代码应保持原始格式,不要添加额外的转义字符 - 若未使用检索工具,则不添加任何引用标记 - + ### 可用资源 你只能使用以下资源,不得使用任何其他工具或助手: @@ -136,10 +196,24 @@ system_prompt: |- {%- else %} - 当前没有可用的助手 {%- endif %} - + + 3. 技能 + {%- if skills and skills|length > 0 %} + - 你拥有上述 `` 中列出的技能。技能中引用的脚本通过 `run_skill_script()` 函数调用,该函数由平台提供,不需要导入。 + + ### 技能使用要求 + 1. **技能优先**:如果用户请求匹配了某个技能的 description,必须先调用 `read_skill_md()` 加载技能指南,再按指南执行。不得跳过技能自行编写代码解决。 + 2. **忠实执行**:读取技能内容后,严格按技能指南中的步骤操作。不要自行修改流程、跳过步骤或用通用代码替代技能定义的流程。 + 3. **脚本调用规范**:只使用 `run_skill_script` 工具执行技能指南中明确要求的脚本。传入的 `skill_name` 和 `script_path` 必须与技能指南中的声明完全一致,不要自行拼接或猜测路径。 + 4. **失败回退**:如果 `read_skill_md` 返回错误或 `run_skill_script` 执行失败,向用户说明情况,并尝试用通用推理模式提供替代方案。 + 5. **技能组合**:如果一个任务需要多个技能配合,按逻辑依赖顺序依次加载和执行,前一个技能的输出可作为后一个技能的输入。 + {%- else %} + - 当前没有可用的技能 + {%- endif %} + ### 资源使用要求 {{ constraint }} - + ### python代码规范 1. 如果认为是需要执行的代码,代码内容以'代码:\n```\n'开头,并以'```'标识符结尾。如果是不需要执行仅用于展示的代码,代码内容以'代码:\n```\n'开头,并以'```'标识符结尾,其中语言类型例如python、java、javascript等; 2. 只使用已定义的变量,变量将在多次调用之间持续保持; @@ -147,8 +221,7 @@ system_prompt: |- 4. 正确使用工具/助手的入参,使用关键字参数,不要用字典形式; 5. 避免在一轮对话中进行过多的工具/助手调用,这会导致输出格式难以预测; 6. 只在需要时调用工具/助手,不重复相同参数的调用; - 7. 只能从以下模块导入:{{authorized_imports}}; - 8. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持。print()输出的内容应被视为字符串,不要对其进行字典相关操作如.get()、[]等,避免类型错误; + 7. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持。print()输出的内容应被视为字符串,不要对其进行字典相关操作如.get()、[]等,避免类型错误; 9. 示例中的代码避免出现**if**、**for**等逻辑,仅调用工具/助手,示例中的每一次的行动都是确定事件。如果有不同的条件,你应该给出不同条件下的示例; 10. 工具调用使用关键字参数,如:tool_name(param1="value1", param2="value2"); 11. 助手调用必须使用task参数,如:assistant_name(task="任务描述"); @@ -177,7 +250,7 @@ managed_agent: planning: initial_plan: |- - + update_plan_pre_messages: |- update_plan_post_messages: |- @@ -186,4 +259,4 @@ planning: final_answer: pre_messages: |- - post_messages: |- \ No newline at end of file + post_messages: |- diff --git a/backend/prompts/utils/prompt_generate_en.yaml b/backend/prompts/utils/prompt_generate_en.yaml index 499d3c4ba..7f55becd3 100644 --- a/backend/prompts/utils/prompt_generate_en.yaml +++ b/backend/prompts/utils/prompt_generate_en.yaml @@ -68,14 +68,13 @@ FEW_SHOTS_SYSTEM_PROMPT: |- 4. Use tool/assistant input parameters correctly, use keyword arguments, not dictionary format; 5. Avoid making too many tool calls in one round of conversation, as this will make the output format unpredictable; 6. Only call tools/assistants when needed, do not repeat calls with the same parameters; - 7. Only import from the following modules: {{authorized_imports}}; - 8. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. Saved information persists between code executions; - 9. Avoid **if**, **for** and other logic in example code, only call tools/assistants. Each action in examples should be a determined event. If there are different conditions, you should provide examples for different conditions; - 10. Tool calls use keyword arguments, such as: tool_name(param1="value1", param2="value2"); - 11. Assistant calls must use "task" as the parameter name, such as: assistant_name(task="task description"). + 7. Use variable names to save function call results. In each intermediate step, you can use "print()" to save any important information you need. Saved information persists between code executions; + 8. Avoid **if**, **for** and other logic in example code, only call tools/assistants. Each action in examples should be a determined event. If there are different conditions, you should provide examples for different conditions; + 9. Tool calls use keyword arguments, such as: tool_name(param1="value1", param2="value2"); + 10. Assistant calls must use "task" as the parameter name, such as: assistant_name(task="task description"). ### Compliant Examples: - Task 1: "Introduce the Oriental Pearl Tower" + Task 1: "Introduce the Oriental Pearl Tower"+ Think: I will first use the knowledge_base_search tool to find if there is relevant information in the local knowledge base. Code: diff --git a/backend/prompts/utils/prompt_generate_zh.yaml b/backend/prompts/utils/prompt_generate_zh.yaml index bc7122bdf..d513bc860 100644 --- a/backend/prompts/utils/prompt_generate_zh.yaml +++ b/backend/prompts/utils/prompt_generate_zh.yaml @@ -67,8 +67,7 @@ FEW_SHOTS_SYSTEM_PROMPT: |- 4. 正确使用工具/助手的入参,使用关键字参数,不要用字典形式; 5. 避免在一轮对话中进行过多的工具调用,这会导致输出格式难以预测; 6. 只在需要时调用工具/助手,不重复相同参数的调用; - 7. 只能从以下模块导入:{{authorized_imports}}; - 8. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持; + 7. 使用变量名保存函数调用结果,在每个中间步骤中,您可以使用“print()”来保存您需要的任何重要信息。被保存的信息在代码执行之间保持; 9. 示例中的代码避免出现**if**、**for**等逻辑,仅调用工具/助手,示例中的每一次的行动都是确定事件。如果有不同的条件,你应该给出不同条件下的示例; 10. 工具调用使用关键字参数,如:tool_name(param1="value1", param2="value2"); 11. 助手调用必须使用"task"作为参数名,如:assistant_name(task="任务描述")。 diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 65e27107a..04b94589c 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -12,11 +12,12 @@ dependencies = [ "supabase>=2.18.1", "websocket-client>=1.8.0", "pyyaml>=6.0.2", + "ruamel-yaml==0.19.1", "redis>=5.0.0", "fastmcp==2.12.0", "langchain>=0.3.26", "scikit-learn>=1.0.0", - "numpy>=1.24.0" + "numpy>=1.24.0", ] [project.optional-dependencies] diff --git a/backend/services/agent_service.py b/backend/services/agent_service.py index c4a1de3ec..f7ac4bbd7 100644 --- a/backend/services/agent_service.py +++ b/backend/services/agent_service.py @@ -27,6 +27,7 @@ ExportAndImportAgentInfo, ExportAndImportDataFormat, MCPInfo, + SkillInstanceInfoRequest, ToolInstanceInfoRequest, ToolSourceEnum, ModelConnectStatusEnum ) @@ -57,6 +58,8 @@ query_tool_instances_by_agent_id, search_tools_for_sub_agent ) +from database import skill_db +from database.agent_version_db import query_version_list from database.group_db import query_group_ids_by_user from database.user_tenant_db import get_user_tenant_by_user_id from utils.str_utils import convert_list_to_string, convert_string_to_list @@ -613,12 +616,9 @@ async def _stream_agent_chunks( except Exception as run_exc: logger.error(f"Agent run error: {str(run_exc)}") # Emit an error chunk and terminate the stream immediately - try: - error_payload = json.dumps( - {"type": "error", "content": str(run_exc)}, ensure_ascii=False) - yield f"data: {error_payload}\n\n" - finally: - return + error_payload = json.dumps( + {"type": "error", "content": str(run_exc)}, ensure_ascii=False) + yield f"data: {error_payload}\n\n" finally: # Persist assistant messages for non-debug runs if not agent_request.is_debug: @@ -880,6 +880,55 @@ async def update_agent_info_impl(request: AgentInfoRequest, authorization: str = logger.error(f"Failed to update agent tools: {str(e)}") raise ValueError(f"Failed to update agent tools: {str(e)}") + # Handle enabled skills saving when provided + try: + if request.enabled_skill_ids is not None and agent_id is not None: + enabled_set = set(request.enabled_skill_ids) + # Query existing skill instances for this agent + existing_instances = skill_db.query_skill_instances_by_agent_id( + agent_id, tenant_id) + + # Handle unselected skill (already exist instance) -> enabled=False + for instance in existing_instances: + inst_skill_id = instance.get("skill_id") + if inst_skill_id is not None and inst_skill_id not in enabled_set: + skill_db.create_or_update_skill_by_skill_info( + skill_info=SkillInstanceInfoRequest( + skill_id=inst_skill_id, + agent_id=agent_id, + skill_description=instance.get("skill_description"), + skill_content=instance.get("skill_content"), + enabled=False + ), + tenant_id=tenant_id, + user_id=user_id + ) + + # Handle selected skill -> enabled=True (create or update) + for skill_id in enabled_set: + # Keep existing skill_description and skill_content if any + existing_instance = next( + (inst for inst in existing_instances + if inst.get("skill_id") == skill_id), + None + ) + skill_description = (existing_instance or {}).get("skill_description") + skill_content = (existing_instance or {}).get("skill_content") + skill_db.create_or_update_skill_by_skill_info( + skill_info=SkillInstanceInfoRequest( + skill_id=skill_id, + agent_id=agent_id, + skill_description=skill_description, + skill_content=skill_content, + enabled=True, + ), + tenant_id=tenant_id, + user_id=user_id + ) + except Exception as e: + logger.error(f"Failed to update agent skills: {str(e)}") + raise ValueError(f"Failed to update agent skills: {str(e)}") + # Handle related agents saving when provided try: if request.related_agent_ids is not None and agent_id is not None: @@ -930,6 +979,7 @@ async def delete_agent_impl(agent_id: int, tenant_id: str, user_id: str): delete_agent_by_id(agent_id, tenant_id, user_id) delete_agent_relationship(agent_id, tenant_id, user_id) delete_tools_by_agent_id(agent_id, tenant_id, user_id) + skill_db.delete_skills_by_agent_id(agent_id, tenant_id, user_id) # Clean up all memory data related to the agent await clear_agent_memory(agent_id, tenant_id, user_id) @@ -1953,6 +2003,26 @@ async def get_agent_id_by_name(agent_name: str, tenant_id: str) -> int: raise Exception("agent not found") +def get_agent_by_name_impl(agent_name: str, tenant_id: str) -> dict: + """ + Resolve agent id and latest published version by agent name. + + Returns: + dict with agent_id and latest_version_no (may be None) + """ + if not agent_name: + raise Exception("agent_name required") + try: + agent_id = search_agent_id_by_agent_name(agent_name, tenant_id) + versions = query_version_list(agent_id, tenant_id) + latest_version = versions[0]["version_no"] if versions else None + return {"agent_id": agent_id, "latest_version_no": latest_version} + except Exception as _: + logger.error( + f"Failed to find agent '{agent_name}' in tenant {tenant_id}") + raise Exception("agent not found") + + def delete_related_agent_impl(parent_agent_id: int, child_agent_id: int, tenant_id: str): """ Delete the relationship between a parent agent and its child agent diff --git a/backend/services/agent_version_service.py b/backend/services/agent_version_service.py index 554b3a6d1..be0b6a564 100644 --- a/backend/services/agent_version_service.py +++ b/backend/services/agent_version_service.py @@ -17,9 +17,11 @@ insert_agent_snapshot, insert_tool_snapshot, insert_relation_snapshot, + insert_skill_snapshot, delete_agent_snapshot, delete_tool_snapshot, delete_relation_snapshot, + delete_skill_snapshot, get_next_version_no, delete_version, SOURCE_TYPE_NORMAL, @@ -94,6 +96,22 @@ def publish_version_impl( _remove_audit_fields_for_insert(rel_snapshot) insert_relation_snapshot(rel_snapshot) + # Get skill instances from draft (version_no=0) + from database import skill_db as skill_db_module + skills_draft = skill_db_module.query_skill_instances_by_agent_id( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=0 + ) + + # Insert skill instance snapshots + for skill in skills_draft: + skill_snapshot = skill.copy() + skill_snapshot.pop('version_no', None) + skill_snapshot['version_no'] = new_version_no + _remove_audit_fields_for_insert(skill_snapshot) + insert_skill_snapshot(skill_snapshot) + # Create version metadata version_data = { 'tenant_id': tenant_id, @@ -154,7 +172,7 @@ def get_version_detail_impl( ) -> dict: """ Get version detail including snapshot data, structured like agent info. - Returns agent info with tools, sub_agents, availability, etc. + Returns agent info with tools, sub_agents, skills, availability, etc. """ result: Dict[str, Any] = {} @@ -193,6 +211,16 @@ def get_version_detail_impl( # Extract sub_agent_id_list from relations result['sub_agent_id_list'] = [r['selected_agent_id'] for r in relations_snapshot] + # Get skill instances for this version (from ag_skill_instance_t with version_no) + from database import skill_db as skill_db_module + skills_snapshot = skill_db_module.query_skill_instances_by_agent_id( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + # Add enabled skills to result + result['skills'] = [s for s in skills_snapshot if s.get('enabled', True)] + # Get model name from model_id if result.get('model_id') is not None and result['model_id'] != 0: model_info = get_model_by_model_id(result['model_id']) @@ -379,7 +407,7 @@ def delete_version_impl( ) -> dict: """ Soft delete a version by setting delete_flag='Y' - Also soft deletes all related snapshot data (agent, tools, relations) for this version + Also soft deletes all related snapshot data (agent, tools, relations, skills) for this version """ # Check if version exists version = search_version_by_version_no(agent_id, tenant_id, version_no) @@ -431,6 +459,14 @@ def delete_version_impl( deleted_by=user_id, ) + # 4. Delete skill instance snapshots + delete_skill_snapshot( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no, + deleted_by=user_id, + ) + logger.info(f"Successfully deleted version {version_no} and all related snapshots for agent_id={agent_id}, tenant_id={tenant_id}") return {"message": f"Version {version_no} deleted successfully"} @@ -549,6 +585,17 @@ def compare_versions_impl( 'value_b': sub_agents_b_count, }) + # Compare skills count + skills_a_count = len(version_a.get('skills', [])) + skills_b_count = len(version_b.get('skills', [])) + if skills_a_count != skills_b_count: + differences.append({ + 'field': 'skills_count', + 'label': 'Skills Count', + 'value_a': skills_a_count, + 'value_b': skills_b_count, + }) + return { 'version_a': version_a, 'version_b': version_b, @@ -565,6 +612,8 @@ def _get_version_detail_or_draft( Get version detail for published versions, or draft data for version 0. Returns structured agent info similar to get_version_detail_impl. """ + from database import skill_db as skill_db_module + result: Dict[str, Any] = {} if version_no == 0: @@ -581,6 +630,15 @@ def _get_version_detail_or_draft( # Add tools (only enabled tools) result['tools'] = [t for t in tools_draft if t.get('enabled', True)] result['sub_agent_id_list'] = [r['selected_agent_id'] for r in relations_draft] + + # Get draft skill instances (version_no=0) + skills_draft = skill_db_module.query_skill_instances_by_agent_id( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=0 + ) + result['skills'] = [s for s in skills_draft if s.get('enabled', True)] + result['version'] = { 'version_name': 'Draft', 'version_status': 'DRAFT', @@ -589,7 +647,7 @@ def _get_version_detail_or_draft( 'source_version_no': 0, } else: - # Get published version detail + # Get published version detail (already includes skills from get_version_detail_impl) result = get_version_detail_impl(agent_id, tenant_id, version_no) # Get model name from model_id diff --git a/backend/services/mcp_container_service.py b/backend/services/mcp_container_service.py index 4c16dedd8..d2ff6c5cf 100644 --- a/backend/services/mcp_container_service.py +++ b/backend/services/mcp_container_service.py @@ -11,8 +11,10 @@ from typing import Dict, List, Optional, AsyncGenerator from consts.exceptions import MCPConnectionError, MCPContainerError +from consts.const import IS_DEPLOYED_BY_KUBERNETES, KUBERNETES_NAMESPACE from nexent.container import ( DockerContainerConfig, + KubernetesContainerConfig, create_container_client_from_config, ContainerError, ContainerConnectionError, @@ -36,19 +38,26 @@ def __init__(self, docker_socket_path: Optional[str] = None): Args: docker_socket_path: Path to Docker socket. If None, uses platform default. For container access, mount docker socket: -v /var/run/docker.sock:/var/run/docker.sock + Only used when running in Docker mode. """ try: - # Create Docker configuration - config = DockerContainerConfig( - docker_socket_path=docker_socket_path - ) - # Create container client from config + if IS_DEPLOYED_BY_KUBERNETES: + logger.info("Initializing Kubernetes container client") + config = KubernetesContainerConfig( + namespace=KUBERNETES_NAMESPACE, + in_cluster=True, + ) + else: + logger.info("Initializing Docker container client") + config = DockerContainerConfig( + docker_socket_path=docker_socket_path + ) self.client = create_container_client_from_config(config) logger.info( - "MCPContainerManager initialized using SDK container module") + f"MCPContainerManager initialized using SDK container module (type: {'kubernetes' if IS_DEPLOYED_BY_KUBERNETES else 'docker'})") except ContainerError as e: logger.error(f"Failed to initialize container manager: {e}") - raise MCPContainerError(f"Cannot connect to Docker: {e}") + raise MCPContainerError(f"Cannot connect to container runtime: {e}") async def load_image_from_tar_file(self, tar_file_path: str) -> str: """ @@ -270,75 +279,150 @@ async def stream_container_logs( Log lines as strings """ try: - container = self.client.client.containers.get(container_id) - loop = asyncio.get_event_loop() - - # First, get initial logs in a thread pool to avoid blocking - initial_logs = await loop.run_in_executor( - None, - lambda: container.logs( - tail=tail, stdout=True, stderr=True, timestamps=False - ) - ) - if initial_logs: - decoded = initial_logs.decode("utf-8", errors="replace") - for line in decoded.splitlines(): - if line.strip(): # Only yield non-empty lines - yield line - - # Then, if follow is True, stream new logs - if follow: - # Create a queue to pass log chunks from thread to async generator - log_queue = asyncio.Queue() - # Use list to allow modification from nested function - stop_flag = [False] - - def _stream_logs_sync(): - """Run blocking log stream in thread""" + if IS_DEPLOYED_BY_KUBERNETES: + # Kubernetes mode: use SDK's read_namespaced_pod_log with follow + namespace = KUBERNETES_NAMESPACE + # Resolve container_id (UID) to actual Pod name + pod_name = self.client._resolve_pod_name(container_id) + if not pod_name: + logger.warning(f"Pod {container_id} not found") + return + + # First, get initial logs + initial_logs = self.client.get_container_logs(container_id, tail=tail) + if initial_logs: + for line in initial_logs.splitlines(): + if line.strip(): + yield line + + if follow: + # Use Kubernetes log API with follow=True in background thread + # (same pattern as Docker) + loop = asyncio.get_event_loop() + log_queue = asyncio.Queue() + stop_flag = [False] + + def _stream_logs_sync(): + """Run blocking Kubernetes log stream in thread""" + try: + # Kubernetes log API with follow=True returns a generator + log_stream = self.client.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace=namespace, + container="mcp-server", + follow=True, + timestamps=False, + _preload_content=False, + tail_lines=0, # Only new logs after initial batch + ) + for log_line in log_stream: + if stop_flag[0]: + break + # Kubernetes API returns bytes, decode to string + if isinstance(log_line, bytes): + log_line = log_line.decode("utf-8", errors="replace") + # Strip trailing newline (Kubernetes API adds \n per line) + if log_line.strip(): + asyncio.run_coroutine_threadsafe( + log_queue.put(log_line.rstrip("\n")), loop + ) + # Signal end of stream + asyncio.run_coroutine_threadsafe( + log_queue.put(None), loop + ) + except Exception as e: + logger.error(f"Error in Kubernetes log stream thread: {e}") + asyncio.run_coroutine_threadsafe( + log_queue.put(None), loop + ) + + # Start streaming in background thread + stream_thread = threading.Thread( + target=_stream_logs_sync, daemon=True + ) + stream_thread.start() + + # Process log lines from queue try: - log_stream = container.logs( - stdout=True, - stderr=True, - follow=True, - stream=True, - timestamps=False, - tail=0, # Only new logs - ) - for log_chunk in log_stream: - if stop_flag[0]: + while True: + log_line = await log_queue.get() + if log_line is None: # End of stream signal break - # Put chunks in queue (will be processed in async context) + if log_line.strip(): + yield log_line + finally: + stop_flag[0] = True + else: + # Docker mode: use native Docker API for streaming + container = self.client.client.containers.get(container_id) + loop = asyncio.get_event_loop() + + # First, get initial logs in a thread pool to avoid blocking + initial_logs = await loop.run_in_executor( + None, + lambda: container.logs( + tail=tail, stdout=True, stderr=True, timestamps=False + ) + ) + if initial_logs: + decoded = initial_logs.decode("utf-8", errors="replace") + for line in decoded.splitlines(): + if line.strip(): # Only yield non-empty lines + yield line + + # Then, if follow is True, stream new logs + if follow: + # Create a queue to pass log chunks from thread to async generator + log_queue = asyncio.Queue() + # Use list to allow modification from nested function + stop_flag = [False] + + def _stream_logs_sync(): + """Run blocking log stream in thread""" + try: + log_stream = container.logs( + stdout=True, + stderr=True, + follow=True, + stream=True, + timestamps=False, + tail=0, # Only new logs + ) + for log_chunk in log_stream: + if stop_flag[0]: + break + # Put chunks in queue (will be processed in async context) + asyncio.run_coroutine_threadsafe( + log_queue.put(log_chunk), loop + ) + # Signal end of stream + asyncio.run_coroutine_threadsafe( + log_queue.put(None), loop + ) + except Exception as e: + logger.error(f"Error in log stream thread: {e}") asyncio.run_coroutine_threadsafe( - log_queue.put(log_chunk), loop + log_queue.put(None), loop ) - # Signal end of stream - asyncio.run_coroutine_threadsafe( - log_queue.put(None), loop - ) - except Exception as e: - logger.error(f"Error in log stream thread: {e}") - asyncio.run_coroutine_threadsafe( - log_queue.put(None), loop - ) - - # Start streaming in background thread - stream_thread = threading.Thread( - target=_stream_logs_sync, daemon=True) - stream_thread.start() - - # Process log chunks from queue - try: - while True: - log_chunk = await log_queue.get() - if log_chunk is None: # End of stream signal - break - decoded = log_chunk.decode("utf-8", errors="replace") - # Split by newlines and yield each line - for line in decoded.splitlines(): - if line.strip(): # Only yield non-empty lines - yield line - finally: - stop_flag[0] = True + + # Start streaming in background thread + stream_thread = threading.Thread( + target=_stream_logs_sync, daemon=True) + stream_thread.start() + + # Process log chunks from queue + try: + while True: + log_chunk = await log_queue.get() + if log_chunk is None: # End of stream signal + break + decoded = log_chunk.decode("utf-8", errors="replace") + # Split by newlines and yield each line + for line in decoded.splitlines(): + if line.strip(): # Only yield non-empty lines + yield line + finally: + stop_flag[0] = True except Exception as e: logger.error(f"Failed to stream container logs: {e}") yield f"Error retrieving logs: {e}" diff --git a/backend/services/model_management_service.py b/backend/services/model_management_service.py index a18c16c36..d012803be 100644 --- a/backend/services/model_management_service.py +++ b/backend/services/model_management_service.py @@ -199,7 +199,11 @@ async def list_provider_models_for_tenant(tenant_id: str, provider: str, model_t model_list = get_models_by_tenant_factory_type( tenant_id, provider, model_type) for model in model_list: - model["id"] = model["model_repo"] + "/" + model["model_name"] + # Use add_repo_to_name for consistent format with /model/list API + model["id"] = add_repo_to_name( + model_repo=model["model_repo"], + model_name=model["model_name"], + ) logging.debug(f"Provider model {provider} created successfully") return model_list diff --git a/backend/services/skill_service.py b/backend/services/skill_service.py new file mode 100644 index 000000000..cf47b4df4 --- /dev/null +++ b/backend/services/skill_service.py @@ -0,0 +1,1445 @@ +"""Skill management service.""" + +import io +import json +import logging +import os +from typing import Any, Dict, List, Optional, Union + +import yaml + +from nexent.skills import SkillManager +from nexent.skills.skill_loader import SkillLoader +from consts.const import CONTAINER_SKILLS_PATH, ROOT_DIR +from consts.exceptions import SkillException +from database import skill_db +from database.db_models import SkillInfo + +logger = logging.getLogger(__name__) + +_skill_manager: Optional[SkillManager] = None + + +def _normalize_zip_entry_path(name: str) -> str: + """Normalize a ZIP member path for comparison (slashes, strip ./).""" + norm = name.replace("\\", "/").strip() + while norm.startswith("./"): + norm = norm[2:] + return norm + + +def _find_zip_member_config_yaml( + file_list: List[str], + preferred_skill_root: Optional[str] = None, +) -> Optional[str]: + """Return the ZIP entry path for .../config/config.yaml (any depth; filename case-insensitive). + + If preferred_skill_root is set (usually the folder containing SKILL.md, e.g. zip root + ``my_skill/SKILL.md`` -> ``my_skill``), prefer ``/config/config.yaml``. + """ + suffix = "/config/config.yaml" + root_only = "config/config.yaml" + candidates: List[str] = [] + for name in file_list: + if name.endswith("/"): + continue + norm = _normalize_zip_entry_path(name) + if not norm: + continue + nlow = norm.lower() + if nlow == root_only or nlow.endswith(suffix): + candidates.append(name) + + if not candidates: + return None + + if preferred_skill_root: + pref = _normalize_zip_entry_path(preferred_skill_root) + if pref: + pref_low = pref.lower() + expected_suffix = f"{pref_low}/config/config.yaml" + for name in candidates: + if _normalize_zip_entry_path(name).lower() == expected_suffix: + return name + for name in candidates: + n = _normalize_zip_entry_path(name).lower() + if n.startswith(pref_low + "/"): + return name + + return candidates[0] + + +def _params_dict_to_storable(data: Dict[str, Any]) -> Dict[str, Any]: + """Ensure params are JSON-serializable for the database JSON column.""" + try: + return json.loads(json.dumps(data, default=str)) + except (TypeError, ValueError) as exc: + raise SkillException( + f"params from config/config.yaml cannot be stored: {exc}" + ) from exc + + +def _comment_text_from_token(tok: Any) -> Optional[str]: + """Normalize a ruamel CommentToken (or similar) to tooltip text after ``#``.""" + if tok is None: + return None + val = getattr(tok, "value", None) + if isinstance(val, str): + s = val.strip() + if s.startswith("#"): + return s[1:].strip() + return None + + +def _tuple_slot2(tok_container: Any) -> Any: + """Return ruamel per-key tuple slot index 2 (EOL / before-next-key comment token).""" + if not tok_container or len(tok_container) <= 2: + return None + return tok_container[2] + + +def _is_before_next_sibling_comment_token(tok: Any) -> bool: + """True if token is a comment line placed *above the next key* (starts with newline in ruamel).""" + if tok is None: + return False + val = getattr(tok, "value", None) + return isinstance(val, str) and val.startswith("\n") + + +def _flatten_ca_comment_to_text(comment_field: Any) -> Optional[str]: + """Join ``#`` lines from ``ca.comment`` (block header above first key in map or first list item).""" + if not comment_field: + return None + parts: List[str] = [] + if isinstance(comment_field, list): + for part in comment_field: + if part is None: + continue + if isinstance(part, list): + for tok in part: + t = _comment_text_from_token(tok) + if t: + parts.append(t) + else: + t = _comment_text_from_token(part) + if t: + parts.append(t) + if not parts: + return None + return " ".join(parts) + + +def _comment_from_map_block_header(cm: Any) -> Optional[str]: + """Lines above the first key in this ``CommentedMap`` (``ca.comment``).""" + ca = getattr(cm, "ca", None) + if not ca or not ca.comment: + return None + return _flatten_ca_comment_to_text(ca.comment) + + +def _tooltip_for_commented_map_key(cm: Any, ordered_keys: List[Any], index: int, key: Any) -> Optional[str]: + """Collect tooltip text: block header, line-above key, and same-line EOL ``#`` for one mapping key.""" + tips: List[str] = [] + if index == 0: + h = _comment_from_map_block_header(cm) + if h: + tips.append(h) + if index > 0: + prev_k = ordered_keys[index - 1] + ca = getattr(cm, "ca", None) + if ca and ca.items: + prev_tup = ca.items.get(prev_k) + tok = _tuple_slot2(prev_tup) if prev_tup else None + if _is_before_next_sibling_comment_token(tok): + t = _comment_text_from_token(tok) + if t: + tips.append(t) + ca = getattr(cm, "ca", None) + if ca and ca.items: + tup = ca.items.get(key) + tok = _tuple_slot2(tup) if tup else None + if tok is not None and not _is_before_next_sibling_comment_token(tok): + t = _comment_text_from_token(tok) + if t: + tips.append(t) + if not tips: + return None + return " ".join(tips) + + +def _tooltip_for_commented_seq_index(seq: Any, index: int) -> Optional[str]: + """Same rules as maps: ``ca.comment`` for item 0; slot 0 on previous item for 'line above next'.""" + tips: List[str] = [] + if index == 0: + ca = getattr(seq, "ca", None) + if ca and ca.comment: + h = _flatten_ca_comment_to_text(ca.comment) + if h: + tips.append(h) + if index > 0: + ca = getattr(seq, "ca", None) + if ca and ca.items: + prev_tup = ca.items.get(index - 1) + if prev_tup and len(prev_tup) > 0 and prev_tup[0] is not None: + tok = prev_tup[0] + if _is_before_next_sibling_comment_token(tok): + t = _comment_text_from_token(tok) + if t: + tips.append(t) + ca = getattr(seq, "ca", None) + if ca and ca.items: + tup = ca.items.get(index) + if tup: + tok = _tuple_slot2(tup) + if tok is not None and not _is_before_next_sibling_comment_token(tok): + t = _comment_text_from_token(tok) + if t: + tips.append(t) + if not tips: + return None + return " ".join(tips) + + +def _apply_inline_comment_to_scalar(val: Any, comment: Optional[str]) -> Any: + """Append `` # comment`` to scalars so the UI can show tooltips (same as frontend convention).""" + if not comment: + return val + if isinstance(val, str): + return f"{val} # {comment}" + if isinstance(val, (dict, list)): + return val + try: + encoded = json.dumps(val, ensure_ascii=False) + except (TypeError, ValueError): + encoded = str(val) + return f"{encoded} # {comment}" + + +def _commented_tree_to_plain(node: Any) -> Any: + """Turn ruamel CommentedMap/Seq into plain dict/list. + + YAML ``#`` comments are merged only into **scalar** values as ``value # tip`` (same as the UI). + Block / line-above-key comments attached to **mapping or list values** are not persisted (no ``_comment`` keys). + """ + from ruamel.yaml.comments import CommentedMap, CommentedSeq + + if isinstance(node, CommentedMap): + ordered_keys = list(node.keys()) + out: Dict[str, Any] = {} + for i, k in enumerate(ordered_keys): + v = node[k] + plain_v = _commented_tree_to_plain(v) + tip = _tooltip_for_commented_map_key(node, ordered_keys, i, k) + if tip is not None and not isinstance(plain_v, (dict, list)): + plain_v = _apply_inline_comment_to_scalar(plain_v, tip) + out[k] = plain_v + return out + if isinstance(node, CommentedSeq): + out_list: List[Any] = [] + for i, v in enumerate(node): + plain_v = _commented_tree_to_plain(v) + tip = _tooltip_for_commented_seq_index(node, i) + if tip is not None and not isinstance(plain_v, (dict, list)): + plain_v = _apply_inline_comment_to_scalar(plain_v, tip) + out_list.append(plain_v) + return out_list + return node + + +def _parse_yaml_with_ruamel_merge_eol_comments(text: str) -> Dict[str, Any]: + """Parse YAML with ruamel; merge ``#`` into scalar values only (``value # tip`` for the UI). + + Does not inject ``_comment`` into nested objects; non-scalar-adjacent YAML comments are dropped. + """ + from ruamel.yaml import YAML + from ruamel.yaml.comments import CommentedMap + + # Round-trip loader preserves ``CommentedMap`` and comment tokens; ``safe`` returns plain dict. + y = YAML(typ="rt") + try: + root = y.load(text) + except Exception as exc: + raise SkillException( + f"Invalid YAML in config/config.yaml: {exc}" + ) from exc + if root is None: + return {} + if isinstance(root, CommentedMap): + plain = _commented_tree_to_plain(root) + elif isinstance(root, dict): + plain = root + else: + raise SkillException( + "config/config.yaml must contain a JSON or YAML object (mapping), not a list or scalar" + ) + if not isinstance(plain, dict): + raise SkillException( + "config/config.yaml must contain a JSON or YAML object (mapping), not a list or scalar" + ) + return _params_dict_to_storable(plain) + + +def _parse_yaml_fallback_pyyaml(text: str) -> Dict[str, Any]: + """Parse YAML with PyYAML (comments are dropped).""" + try: + data = yaml.safe_load(text) + except yaml.YAMLError as exc: + raise SkillException( + f"Invalid JSON or YAML in config/config.yaml: {exc}" + ) from exc + if data is None: + return {} + if not isinstance(data, dict): + raise SkillException( + "config/config.yaml must contain a JSON or YAML object (mapping), not a list or scalar" + ) + return _params_dict_to_storable(data) + + +def _parse_skill_params_from_config_bytes(raw: bytes) -> Dict[str, Any]: + """Parse JSON or YAML from config/config.yaml bytes (DB upload path; scalar ``#`` tips merged when possible).""" + text = raw.decode("utf-8-sig").strip() + if not text: + return {} + try: + data = json.loads(text) + except json.JSONDecodeError: + try: + return _parse_yaml_with_ruamel_merge_eol_comments(text) + except ImportError: + logger.warning("ruamel.yaml not installed; YAML comments will be dropped on parse") + return _parse_yaml_fallback_pyyaml(text) + except SkillException: + raise + except Exception as exc: + logger.warning( + "ruamel YAML parse failed (%s); falling back to PyYAML", + exc, + ) + return _parse_yaml_fallback_pyyaml(text) + else: + if not isinstance(data, dict): + raise SkillException( + "config/config.yaml must contain a JSON or YAML object (mapping), not a list or scalar" + ) + return _params_dict_to_storable(data) + + +def _read_params_from_zip_config_yaml( + zip_bytes: bytes, + preferred_skill_root: Optional[str] = None, +) -> Optional[Dict[str, Any]]: + """If the archive contains config/config.yaml, read and parse it into params; else None.""" + import zipfile + + zip_stream = io.BytesIO(zip_bytes) + with zipfile.ZipFile(zip_stream, "r") as zf: + member = _find_zip_member_config_yaml( + zf.namelist(), + preferred_skill_root=preferred_skill_root, + ) + if not member: + return None + raw = zf.read(member) + params = _parse_skill_params_from_config_bytes(raw) + logger.info("Loaded skill params from ZIP member %s", member) + return params + + +def _local_skill_config_yaml_path(skill_name: str, local_skills_dir: str) -> str: + """Absolute path to //config/config.yaml.""" + return os.path.join(local_skills_dir, skill_name, "config", "config.yaml") + + +def _write_skill_params_to_local_config_yaml( + skill_name: str, + params: Dict[str, Any], + local_skills_dir: str, +) -> None: + """Write params to config/config.yaml; scalar ``value # tip`` strings round-trip as YAML comments above keys.""" + from utils.skill_params_utils import params_dict_to_roundtrip_yaml_text + + if not local_skills_dir: + return + config_dir = os.path.join(local_skills_dir, skill_name, "config") + os.makedirs(config_dir, exist_ok=True) + path = _local_skill_config_yaml_path(skill_name, local_skills_dir) + text = params_dict_to_roundtrip_yaml_text(params) + with open(path, "w", encoding="utf-8") as f: + f.write(text) + logger.info("Wrote skill params to %s", path) + + +def _remove_local_skill_config_yaml(skill_name: str, local_skills_dir: str) -> None: + """Remove config/config.yaml when params are cleared in the database.""" + if not local_skills_dir: + return + path = _local_skill_config_yaml_path(skill_name, local_skills_dir) + if os.path.isfile(path): + os.remove(path) + logger.info("Removed %s (params cleared in DB)", path) + + +def get_skill_manager() -> SkillManager: + """Get or create the global SkillManager instance.""" + global _skill_manager + if _skill_manager is None: + _skill_manager = SkillManager(CONTAINER_SKILLS_PATH) + return _skill_manager + + +class SkillService: + """Skill management service for backend operations.""" + + def __init__(self, skill_manager: Optional[SkillManager] = None): + """Initialize SkillService. + + Args: + skill_manager: Optional SkillManager instance, uses global if not provided + """ + self.skill_manager = skill_manager or get_skill_manager() + + def _resolve_local_skills_dir_for_overlay(self) -> Optional[str]: + """Directory where skill folders live: ``SKILLS_PATH``, else ``ROOT_DIR/skills`` if present.""" + d = self.skill_manager.local_skills_dir or CONTAINER_SKILLS_PATH + if d: + return str(d).rstrip(os.sep) or None + if ROOT_DIR: + candidate = os.path.join(ROOT_DIR, "skills") + if os.path.isdir(candidate): + return candidate + return None + + def _overlay_params_from_local_config_yaml(self, skill: Dict[str, Any]) -> Dict[str, Any]: + """Prefer ``//config/config.yaml`` for ``params`` in API responses. + + The database stores comment-free JSON (no legacy ``_comment`` keys, no `` # `` suffixes). + On-disk YAML may use ``#`` lines; when the file exists, parse with ruamel (inline tips + on scalars only) and use for ``params``; otherwise use DB. + """ + out = dict(skill) + local_dir = self._resolve_local_skills_dir_for_overlay() + if not local_dir: + return out + name = out.get("name") + if not name: + return out + path = _local_skill_config_yaml_path(name, local_dir) + if not os.path.isfile(path): + return out + try: + with open(path, "rb") as f: + raw = f.read() + out["params"] = _parse_skill_params_from_config_bytes(raw) + logger.info("Using local config.yaml params (scalar inline comment tooltips) for skill %s", name) + except Exception as exc: + logger.warning( + "Could not use local config.yaml for skill %s params (using DB): %s", + name, + exc, + ) + return out + + def list_skills(self, tenant_id: Optional[str] = None) -> List[Dict[str, Any]]: + """List all skills for tenant. + + Args: + tenant_id: Tenant ID (reserved for future multi-tenant support) + + Returns: + List of skill info dicts + """ + try: + skills = skill_db.list_skills() + return [self._overlay_params_from_local_config_yaml(s) for s in skills] + except Exception as e: + logger.error(f"Error listing skills: {e}") + raise SkillException(f"Failed to list skills: {str(e)}") from e + + def get_skill(self, skill_name: str, tenant_id: Optional[str] = None) -> Optional[Dict[str, Any]]: + """Get a specific skill. + + Args: + skill_name: Name of the skill + tenant_id: Tenant ID (reserved for future multi-tenant support) + + Returns: + Skill dict or None if not found + """ + try: + skill = skill_db.get_skill_by_name(skill_name) + if skill: + return self._overlay_params_from_local_config_yaml(skill) + return None + except Exception as e: + logger.error(f"Error getting skill {skill_name}: {e}") + raise SkillException(f"Failed to get skill: {str(e)}") from e + + def get_skill_by_id(self, skill_id: int) -> Optional[Dict[str, Any]]: + """Get a specific skill by ID. + + Args: + skill_id: ID of the skill + + Returns: + Skill dict or None if not found + """ + try: + skill = skill_db.get_skill_by_id(skill_id) + if skill: + return self._overlay_params_from_local_config_yaml(skill) + return None + except Exception as e: + logger.error(f"Error getting skill by ID {skill_id}: {e}") + raise SkillException(f"Failed to get skill: {str(e)}") from e + + def create_skill( + self, + skill_data: Dict[str, Any], + tenant_id: Optional[str] = None, + user_id: Optional[str] = None + ) -> Dict[str, Any]: + """Create a new skill. + + Args: + skill_data: Skill data including name, description, content, etc. + tenant_id: Tenant ID (reserved for future multi-tenant support) + user_id: User ID of the creator + + Returns: + Created skill dict + + Raises: + SkillException: If skill already exists locally or in database (409) + """ + skill_name = skill_data.get("name") + if not skill_name: + raise SkillException("Skill name is required") + + # Check if skill already exists in database + existing = skill_db.get_skill_by_name(skill_name) + if existing: + raise SkillException(f"Skill '{skill_name}' already exists") + + # Check if skill directory already exists locally + resolved = self._resolve_local_skills_dir_for_overlay() + if resolved and os.path.exists(os.path.join(resolved, skill_name)): + raise SkillException(f"Skill '{skill_name}' already exists locally") + + # Set created_by and updated_by if user_id is provided + if user_id: + skill_data["created_by"] = user_id + skill_data["updated_by"] = user_id + + try: + # Create database record first + result = skill_db.create_skill(skill_data) + + # Create local skill file (SKILL.md) + self.skill_manager.save_skill(skill_data) + + # Mirror DB params to config/config.yaml when present (same layout as ZIP uploads). + if self.skill_manager.local_skills_dir and skill_data.get("params") is not None: + try: + _write_skill_params_to_local_config_yaml( + skill_name, + _params_dict_to_storable(skill_data["params"]), + self.skill_manager.local_skills_dir, + ) + except Exception as exc: + logger.warning( + "Local config/config.yaml write failed after create for %s: %s", + skill_name, + exc, + ) + + logger.info(f"Created skill '{skill_name}' with local files") + return self._overlay_params_from_local_config_yaml(result) + except SkillException: + raise + except Exception as e: + logger.error(f"Error creating skill: {e}") + raise SkillException(f"Failed to create skill: {str(e)}") from e + + def create_skill_from_file( + self, + file_content: Union[bytes, str, io.BytesIO], + skill_name: Optional[str] = None, + file_type: str = "auto", + tenant_id: Optional[str] = None, + user_id: Optional[str] = None + ) -> Dict[str, Any]: + """Create a skill from file content. + + Supports two formats: + 1. Single SKILL.md file - extracts metadata and saves directly + 2. ZIP archive - extracts SKILL.md and all other files/scripts + + Args: + file_content: File content as bytes, string, or BytesIO + skill_name: Optional skill name (extracted from ZIP if not provided) + file_type: File type hint - "md", "zip", or "auto" (detect) + tenant_id: Tenant ID (reserved for future multi-tenant support) + user_id: User ID of the creator + + Returns: + Created skill dict + """ + content_bytes: bytes + if isinstance(file_content, str): + content_bytes = file_content.encode("utf-8") + elif isinstance(file_content, io.BytesIO): + content_bytes = file_content.getvalue() + else: + content_bytes = file_content + + if file_type == "auto": + if content_bytes.startswith(b"PK"): + file_type = "zip" + else: + file_type = "md" + + if file_type == "zip": + return self._create_skill_from_zip(content_bytes, skill_name, user_id, tenant_id) + else: + return self._create_skill_from_md(content_bytes, skill_name, user_id, tenant_id) + + def _create_skill_from_md( + self, + content_bytes: bytes, + skill_name: Optional[str] = None, + user_id: Optional[str] = None, + tenant_id: Optional[str] = None + ) -> Dict[str, Any]: + """Create skill from SKILL.md content.""" + content_str = content_bytes.decode("utf-8") + + try: + skill_data = SkillLoader.parse(content_str) + except ValueError as e: + raise SkillException(f"Invalid SKILL.md format: {e}") + + name = skill_name or skill_data.get("name") + if not name: + raise SkillException("Skill name is required") + + # Check if skill already exists in database + existing = skill_db.get_skill_by_name(name) + if existing: + raise SkillException(f"Skill '{name}' already exists") + + # Convert allowed_tools (from SKILL.md) to tool_ids for database + allowed_tools = skill_data.get("allowed_tools", []) + tool_ids = [] + if allowed_tools: + tool_ids = skill_db.get_tool_ids_by_names(allowed_tools, tenant_id) + + skill_dict = { + "name": name, + "description": skill_data.get("description", ""), + "content": skill_data.get("content", ""), + "tags": skill_data.get("tags", []), + "source": "custom", + "tool_ids": tool_ids, + "allowed-tools": allowed_tools, # Preserve for local file sync + } + + # Set created_by and updated_by if user_id is provided + if user_id: + skill_dict["created_by"] = user_id + skill_dict["updated_by"] = user_id + + result = skill_db.create_skill(skill_dict) + + # Write SKILL.md to local storage + self.skill_manager.save_skill(skill_dict) + + return self._overlay_params_from_local_config_yaml(result) + + def _create_skill_from_zip( + self, + zip_bytes: bytes, + skill_name: Optional[str] = None, + user_id: Optional[str] = None, + tenant_id: Optional[str] = None + ) -> Dict[str, Any]: + """Create skill from ZIP archive (for file storage, content extracted from SKILL.md). + + Priority for skill_name: + 1. Parameter skill_name + 2. Root directory SKILL.md (top-level skill_name field) + 3. Subdirectory name containing SKILL.md + """ + import zipfile + + zip_stream = io.BytesIO(zip_bytes) + + try: + with zipfile.ZipFile(zip_stream, "r") as zf: + file_list = zf.namelist() + except zipfile.BadZipFile: + raise SkillException("Invalid ZIP archive") + + zip_stream.seek(0) + + skill_md_path: Optional[str] = None + detected_skill_name: Optional[str] = None + + # First: Check for SKILL.md at root level + for file_path in file_list: + if file_path.endswith("/"): + continue + normalized_path = file_path.replace("\\", "/") + parts = normalized_path.split("/") + # Root level SKILL.md (only 1 part) + if len(parts) == 1 and parts[0].lower() == "skill.md": + skill_md_path = file_path + break + + # Second: If not found at root, check subdirectory + if not skill_md_path: + for file_path in file_list: + if file_path.endswith("/"): + continue + normalized_path = file_path.replace("\\", "/") + parts = normalized_path.split("/") + if len(parts) >= 2 and parts[-1].lower() == "skill.md": + skill_md_path = file_path + detected_skill_name = parts[0] + break + + if not skill_md_path: + raise SkillException("SKILL.md not found in ZIP archive") + + name = skill_name or detected_skill_name + if not name: + raise SkillException("Skill name is required") + + # Check if skill already exists in database + existing = skill_db.get_skill_by_name(name) + if existing: + raise SkillException(f"Skill '{name}' already exists") + + with zipfile.ZipFile(zip_stream, "r") as zf: + skill_content = zf.read(skill_md_path).decode("utf-8") + + try: + skill_data = SkillLoader.parse(skill_content) + except ValueError as e: + raise SkillException(f"Invalid SKILL.md in ZIP: {e}") + + # If still no name, try to get from SKILL.md parsed data + if not name: + name = skill_data.get("name") + + if not name: + raise SkillException("Skill name is required") + + # Convert allowed_tools (from SKILL.md) to tool_ids for database + allowed_tools = skill_data.get("allowed_tools", []) + tool_ids = [] + if allowed_tools: + tool_ids = skill_db.get_tool_ids_by_names(allowed_tools, tenant_id) + + skill_dict = { + "name": name, + "description": skill_data.get("description", ""), + "content": skill_data.get("content", ""), + "tags": skill_data.get("tags", []), + "source": "custom", + "tool_ids": tool_ids, + "allowed-tools": allowed_tools, # Preserve for local file sync + } + + preferred_root = detected_skill_name or name + params_from_zip = _read_params_from_zip_config_yaml( + zip_bytes, + preferred_skill_root=preferred_root, + ) + if params_from_zip is not None: + skill_dict["params"] = params_from_zip + + # Set created_by and updated_by if user_id is provided + if user_id: + skill_dict["created_by"] = user_id + skill_dict["updated_by"] = user_id + + result = skill_db.create_skill(skill_dict) + + # Save SKILL.md to local storage + self.skill_manager.save_skill(skill_dict) + + self._upload_zip_files(zip_bytes, name, detected_skill_name) + + return self._overlay_params_from_local_config_yaml(result) + + def _delete_local_skill_files(self, skill_name: str) -> None: + """Delete all files within a skill's local directory, preserving the directory itself. + + Args: + skill_name: Name of the skill whose local files should be deleted. + """ + import shutil + + local_dir = os.path.join(self.skill_manager.local_skills_dir, skill_name) + logger.info("Starting deletion of local files for skill '%s' from '%s'", skill_name, local_dir) + + if not os.path.isdir(local_dir): + logger.info("Local skill directory does not exist, nothing to delete: %s", local_dir) + return + try: + items = os.listdir(local_dir) + logger.info("Found %d items to delete in '%s'", len(items), local_dir) + + for item in items: + item_path = os.path.join(local_dir, item) + if item_path.endswith("/"): + continue + if os.path.isdir(item_path): + shutil.rmtree(item_path) + logger.debug("Deleted directory: %s", item_path) + else: + os.remove(item_path) + logger.debug("Deleted file: %s", item_path) + logger.info("Successfully deleted all local files for skill '%s'", skill_name) + except Exception as e: + logger.error("Failed to delete local files for skill '%s': %s", skill_name, e) + + def _upload_zip_files( + self, + zip_bytes: bytes, + skill_name: str, + original_folder_name: Optional[str] = None + ) -> None: + """Extract ZIP files to local storage only. + + Args: + zip_bytes: ZIP archive content + skill_name: Target skill name (for local directory) + original_folder_name: Original folder name in ZIP (if different from skill_name) + """ + import zipfile + + zip_stream = io.BytesIO(zip_bytes) + + # Determine if folder renaming is needed + needs_rename = ( + original_folder_name is not None + and original_folder_name != skill_name + ) + + logger.info( + "Starting ZIP extraction for skill '%s': needs_rename=%s, original_folder='%s'", + skill_name, needs_rename, original_folder_name + ) + + try: + with zipfile.ZipFile(zip_stream, "r") as zf: + file_list = zf.namelist() + logger.info("ZIP contains %d entries for skill '%s'", len(file_list), skill_name) + + extracted_count = 0 + for file_path in file_list: + if file_path.endswith("/"): + continue + + normalized_path = file_path.replace("\\", "/") + parts = normalized_path.split("/") + + # Calculate target relative path + if needs_rename and len(parts) >= 2 and parts[0] == original_folder_name: + # Replace original folder name with skill_name + relative_path = parts[0].replace(original_folder_name, skill_name) + "/" + "/".join(parts[1:]) + elif len(parts) >= 2: + relative_path = "/".join(parts[1:]) + else: + relative_path = normalized_path + + if not relative_path: + continue + + file_data = zf.read(file_path) + + local_dir = os.path.join(self.skill_manager.local_skills_dir, skill_name) + local_path = os.path.join(local_dir, relative_path) + os.makedirs(os.path.dirname(local_path), exist_ok=True) + with open(local_path, "wb") as f: + f.write(file_data) + extracted_count += 1 + logger.debug("Extracted file '%s' -> '%s'", file_path, local_path) + + logger.info( + "Completed ZIP extraction for skill '%s': %d files extracted to '%s'", + skill_name, extracted_count, self.skill_manager.local_skills_dir + ) + except Exception as e: + logger.error("Failed to extract ZIP files for skill '%s': %s", skill_name, e) + raise + + def update_skill_from_file( + self, + skill_name: str, + file_content: Union[bytes, str, io.BytesIO], + file_type: str = "auto", + tenant_id: Optional[str] = None, + user_id: Optional[str] = None + ) -> Dict[str, Any]: + """Update an existing skill from file content. + + Args: + skill_name: Name of the skill to update + file_content: File content as bytes, string, or BytesIO + file_type: File type hint - "md", "zip", or "auto" (detect) + tenant_id: Tenant ID (reserved for future multi-tenant support) + user_id: User ID of the updater + + Returns: + Updated skill dict + """ + existing = skill_db.get_skill_by_name(skill_name) + if not existing: + raise SkillException(f"Skill not found: {skill_name}") + + content_bytes: bytes + if isinstance(file_content, str): + content_bytes = file_content.encode("utf-8") + elif isinstance(file_content, io.BytesIO): + content_bytes = file_content.getvalue() + else: + content_bytes = file_content + + if file_type == "auto": + if content_bytes.startswith(b"PK"): + file_type = "zip" + else: + file_type = "md" + + if file_type == "zip": + return self._update_skill_from_zip(content_bytes, skill_name, user_id, tenant_id) + else: + return self._update_skill_from_md(content_bytes, skill_name, user_id, tenant_id) + + def _update_skill_from_md( + self, + content_bytes: bytes, + skill_name: str, + user_id: Optional[str] = None, + tenant_id: Optional[str] = None + ) -> Dict[str, Any]: + """Update skill from SKILL.md content.""" + content_str = content_bytes.decode("utf-8") + + try: + skill_data = SkillLoader.parse(content_str) + except ValueError as e: + raise SkillException(f"Invalid SKILL.md format: {e}") + + # Get allowed-tools from parsed content and try to map to tool_ids + allowed_tools = skill_data.get("allowed_tools", []) + tool_ids = [] + if allowed_tools: + tool_ids = skill_db.get_tool_ids_by_names(allowed_tools, tenant_id) + + skill_dict = { + "description": skill_data.get("description", ""), + "content": skill_data.get("content", ""), + "tags": skill_data.get("tags", []), + "tool_ids": tool_ids, + } + + result = skill_db.update_skill( + skill_name, skill_dict, updated_by=user_id or None + ) + + # Clean up existing local files before writing new ones + self._delete_local_skill_files(skill_name) + + # Update local storage with new SKILL.md (preserve allowed-tools) + skill_dict["name"] = skill_name + skill_dict["allowed-tools"] = allowed_tools + self.skill_manager.save_skill(skill_dict) + + return self._overlay_params_from_local_config_yaml(result) + + def _update_skill_from_zip( + self, + zip_bytes: bytes, + skill_name: str, + user_id: Optional[str] = None, + tenant_id: Optional[str] = None, + ) -> Dict[str, Any]: + """Update skill from ZIP archive.""" + existing = skill_db.get_skill_by_name(skill_name) + if not existing: + raise SkillException(f"Skill not found: {skill_name}") + + import zipfile + + zip_stream = io.BytesIO(zip_bytes) + + skill_md_path = None + original_folder_name = None + + with zipfile.ZipFile(zip_stream, "r") as zf: + file_list = zf.namelist() + + for file_path in file_list: + normalized_path = file_path.replace("\\", "/") + if normalized_path.lower().endswith("skill.md"): + parts = normalized_path.split("/") + if len(parts) >= 2: + skill_md_path = file_path + original_folder_name = parts[0] + break + + skill_content = None + if skill_md_path: + skill_content = zf.read(skill_md_path).decode("utf-8") + + # Reset stream position before _upload_zip_files reads it + zip_stream.seek(0) + + preferred_root = original_folder_name or skill_name + params_from_zip = _read_params_from_zip_config_yaml( + zip_bytes, + preferred_skill_root=preferred_root, + ) + + skill_dict = {} + allowed_tools = [] + if skill_content: + try: + skill_data = SkillLoader.parse(skill_content) + allowed_tools = skill_data.get("allowed_tools", []) + # Try to map allowed_tools to tool_ids for database + tool_ids = [] + if allowed_tools: + tool_ids = skill_db.get_tool_ids_by_names(allowed_tools, tenant_id) + skill_dict = { + "description": skill_data.get("description", ""), + "content": skill_data.get("content", ""), + "tags": skill_data.get("tags", []), + "tool_ids": tool_ids, + } + except ValueError as e: + logger.warning(f"Could not parse SKILL.md from ZIP: {e}") + + if params_from_zip is not None: + skill_dict["params"] = params_from_zip + + result = skill_db.update_skill( + skill_name, skill_dict, updated_by=user_id or None + ) + + # Clean up existing local files before writing new ones + self._delete_local_skill_files(skill_name) + + # Update SKILL.md in local storage (preserve allowed-tools) + skill_dict["name"] = skill_name + skill_dict["allowed-tools"] = allowed_tools + self.skill_manager.save_skill(skill_dict) + + # Update other files in local storage + self._upload_zip_files(zip_bytes, skill_name, original_folder_name) + + return self._overlay_params_from_local_config_yaml(result) + + def update_skill( + self, + skill_name: str, + skill_data: Dict[str, Any], + tenant_id: Optional[str] = None, + user_id: Optional[str] = None + ) -> Dict[str, Any]: + """Update an existing skill. + + Args: + skill_name: Name of the skill to update + skill_data: Business fields from the application layer (no audit fields). + tenant_id: Tenant ID (reserved for future multi-tenant support) + user_id: Updater id from server-side auth (JWT / session); sets DB updated_by. + + Returns: + Updated skill dict + """ + try: + existing = skill_db.get_skill_by_name(skill_name) + if not existing: + raise SkillException(f"Skill not found: {skill_name}") + + result = skill_db.update_skill( + skill_name, skill_data, updated_by=user_id or None + ) + + # Keep config/config.yaml in sync when params are updated (matches ZIP import path). + if CONTAINER_SKILLS_PATH and "params" in skill_data: + try: + raw_params = skill_data["params"] + if raw_params is None: + _remove_local_skill_config_yaml(skill_name, CONTAINER_SKILLS_PATH) + else: + _write_skill_params_to_local_config_yaml( + skill_name, + _params_dict_to_storable(raw_params), + CONTAINER_SKILLS_PATH, + ) + except Exception as exc: + logger.warning( + "Local config/config.yaml sync failed after params update for %s: %s", + skill_name, + exc, + ) + + # Optional: sync SKILL.md on disk when SKILLS_PATH is configured (DB is source of truth). + if not CONTAINER_SKILLS_PATH: + logger.warning( + "SKILLS_PATH is not set; skipped local SKILL.md sync after DB update for %s", + skill_name, + ) + return self._overlay_params_from_local_config_yaml(result) + + try: + allowed_tools = skill_db.get_tool_names_by_skill_name(skill_name) + local_skill_dict = { + "name": skill_name, + "description": skill_data.get("description", existing.get("description", "")), + "content": skill_data.get("content", existing.get("content", "")), + "tags": skill_data.get("tags", existing.get("tags", [])), + "allowed-tools": allowed_tools, + } + self.skill_manager.save_skill(local_skill_dict) + except Exception as exc: + logger.warning( + "Local SKILL.md sync failed after DB update for %s: %s", + skill_name, + exc, + ) + + return self._overlay_params_from_local_config_yaml(result) + except SkillException: + raise + except Exception as e: + logger.error(f"Error updating skill {skill_name}: {e}") + raise SkillException(f"Failed to update skill: {str(e)}") from e + + def delete_skill( + self, + skill_name: str, + user_id: Optional[str] = None + ) -> bool: + """Delete a skill. + + Args: + skill_name: Name of the skill to delete + tenant_id: Tenant ID (reserved for future multi-tenant support) + user_id: User ID of the user performing the delete + + Returns: + True if deleted successfully + """ + try: + # Delete local skill files from filesystem + skill_dir = os.path.join(self.skill_manager.local_skills_dir, skill_name) + if os.path.exists(skill_dir): + import shutil + shutil.rmtree(skill_dir) + logger.info(f"Deleted skill directory: {skill_dir}") + + # Delete from database (soft delete with updated_by) + return skill_db.delete_skill(skill_name, updated_by=user_id) + except Exception as e: + logger.error(f"Error deleting skill {skill_name}: {e}") + raise SkillException(f"Failed to delete skill: {str(e)}") from e + + + def get_enabled_skills_for_agent( + self, + agent_id: int, + tenant_id: str, + version_no: int = 0 + ) -> List[Dict[str, Any]]: + """Get enabled skills for a specific agent from SkillInstance table. + + Args: + agent_id: Agent ID + tenant_id: Tenant ID + version_no: Version number for fetching skill instances + + Returns: + List of enabled skill dicts + """ + try: + enabled_skills = skill_db.search_skills_for_agent( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + + result = [] + for skill_instance in enabled_skills: + skill_id = skill_instance.get("skill_id") + skill = skill_db.get_skill_by_id(skill_id) + if skill: + # Get skill info from ag_skill_info_t (repository returns keys: name, description, content) + merged = { + "skill_id": skill_id, + "name": skill.get("name"), + "description": skill.get("description", ""), + "content": skill.get("content", ""), + "enabled": skill_instance.get("enabled", True), + "tool_ids": skill.get("tool_ids", []), + } + result.append(merged) + + return result + except Exception as e: + logger.error(f"Error getting enabled skills for agent: {e}") + raise SkillException(f"Failed to get enabled skills: {str(e)}") from e + + def load_skill_directory(self, skill_name: str) -> Optional[Dict[str, Any]]: + """Load entire skill directory including scripts. + + Args: + skill_name: Name of the skill + + Returns: + Dict with skill metadata and local directory path, or None if not found + """ + try: + return self.skill_manager.load_skill_directory(skill_name) + except Exception as e: + logger.error(f"Error loading skill directory {skill_name}: {e}") + raise SkillException(f"Failed to load skill directory: {str(e)}") from e + + def get_skill_scripts(self, skill_name: str) -> List[str]: + """Get list of executable scripts in skill. + + Args: + skill_name: Name of the skill + + Returns: + List of script file paths + """ + try: + return self.skill_manager.get_skill_scripts(skill_name) + except Exception as e: + logger.error(f"Error getting skill scripts {skill_name}: {e}") + raise SkillException(f"Failed to get skill scripts: {str(e)}") from e + + def build_skills_summary( + self, + available_skills: Optional[List[str]] = None, + agent_id: Optional[int] = None, + tenant_id: Optional[str] = None, + version_no: int = 0 + ) -> str: + """Build skills summary with whitelist filter for prompt injection. + + Args: + available_skills: Optional whitelist of skill names to include. + If provided, only skills in this list will be included. + agent_id: Agent ID for fetching skill instances + tenant_id: Tenant ID for fetching skill instances + version_no: Version number for fetching skill instances + + Returns: + XML-formatted skills summary + """ + try: + skills_to_include = [] + + if agent_id and tenant_id: + # Get skills from SkillInstance table + agent_skills = skill_db.search_skills_for_agent( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + + for skill_instance in agent_skills: + skill_id = skill_instance.get("skill_id") + skill = skill_db.get_skill_by_id(skill_id) + if skill: + if available_skills is not None and skill.get("name") not in available_skills: + continue + # Get skill info from ag_skill_info_t (repository returns keys: name, description) + skills_to_include.append({ + "name": skill.get("name"), + "description": skill.get("description", ""), + }) + else: + # Fallback: use all skills + all_skills = skill_db.list_skills() + skills_to_include = all_skills + if available_skills is not None: + available_set = set(available_skills) + skills_to_include = [s for s in all_skills if s.get("name") in available_set] + + if not skills_to_include: + return "" + + def escape_xml(s: str) -> str: + if s is None: + return "" + return str(s).replace("&", "&").replace("<", "<").replace(">", ">") + + lines = [""] + for skill in skills_to_include: + name = escape_xml(skill.get("name", "")) + description = escape_xml(skill.get("description", "")) + + lines.append(f' ') + lines.append(f' {name}') + lines.append(f' {description}') + lines.append(f' ') + + lines.append("") + + return "\n".join(lines) + except Exception as e: + logger.error(f"Error building skills summary: {e}") + raise SkillException(f"Failed to build skills summary: {str(e)}") from e + + def get_skill_content(self, skill_name: str, tenant_id: Optional[str] = None) -> str: + """Get skill content for runtime loading. + + Args: + skill_name: Name of the skill to load + tenant_id: Tenant ID (reserved for future multi-tenant support) + + Returns: + Skill content in markdown format + """ + try: + skill = skill_db.get_skill_by_name(skill_name) + return skill.get("content", "") if skill else "" + except Exception as e: + logger.error(f"Error getting skill content {skill_name}: {e}") + raise SkillException(f"Failed to get skill content: {str(e)}") from e + + def get_skill_file_tree( + self, + skill_name: str, + tenant_id: Optional[str] = None + ) -> Optional[Dict[str, Any]]: + """Get file tree structure of a skill. + + Args: + skill_name: Name of the skill + tenant_id: Tenant ID (reserved for future multi-tenant support) + + Returns: + Dict with file tree structure, or None if not found + """ + try: + return self.skill_manager.get_skill_file_tree(skill_name) + except Exception as e: + logger.error(f"Error getting skill file tree: {e}") + raise SkillException(f"Failed to get skill file tree: {str(e)}") from e + + def get_skill_file_content( + self, + skill_name: str, + file_path: str, + tenant_id: Optional[str] = None + ) -> Optional[str]: + """Get content of a specific file within a skill. + + Args: + skill_name: Name of the skill + file_path: Relative path to the file within the skill directory + tenant_id: Tenant ID (reserved for future multi-tenant support) + + Returns: + File content as string, or None if file not found + """ + try: + local_dir = os.path.join(self.skill_manager.local_skills_dir, skill_name) + full_path = os.path.join(local_dir, file_path) + + if not os.path.exists(full_path): + logger.warning(f"File not found: {full_path}") + return None + + with open(full_path, "r", encoding="utf-8") as f: + return f.read() + except Exception as e: + logger.error(f"Error reading skill file {skill_name}/{file_path}: {e}") + raise SkillException(f"Failed to read skill file: {str(e)}") from e + + # ============== Skill Instance Methods ============== + + def create_or_update_skill_instance( + self, + skill_info, + tenant_id: str, + user_id: str, + version_no: int = 0 + ): + """Create or update a skill instance for an agent. + + Args: + skill_info: Skill instance information (SkillInstanceInfoRequest or dict) + tenant_id: Tenant ID + user_id: User ID (will be set as created_by/updated_by) + version_no: Version number (default 0 for draft) + + Returns: + Created or updated skill instance dict + """ + from database import skill_db as skill_db_module + return skill_db_module.create_or_update_skill_by_skill_info( + skill_info=skill_info, + tenant_id=tenant_id, + user_id=user_id, + version_no=version_no + ) + + def list_skill_instances( + self, + agent_id: int, + tenant_id: str, + version_no: int = 0 + ) -> List[Dict[str, Any]]: + """List all skill instances for an agent. + + Args: + agent_id: Agent ID + tenant_id: Tenant ID + version_no: Version number (default 0 for draft) + + Returns: + List of skill instance dicts + """ + from database import skill_db as skill_db_module + return skill_db_module.query_skill_instances_by_agent_id( + agent_id=agent_id, + tenant_id=tenant_id, + version_no=version_no + ) + + def get_skill_instance( + self, + agent_id: int, + skill_id: int, + tenant_id: str, + version_no: int = 0 + ) -> Optional[Dict[str, Any]]: + """Get a specific skill instance for an agent. + + Args: + agent_id: Agent ID + skill_id: Skill ID + tenant_id: Tenant ID + version_no: Version number (default 0 for draft) + + Returns: + Skill instance dict or None if not found + """ + from database import skill_db as skill_db_module + return skill_db_module.query_skill_instance_by_id( + agent_id=agent_id, + skill_id=skill_id, + tenant_id=tenant_id, + version_no=version_no + ) diff --git a/backend/services/tool_configuration_service.py b/backend/services/tool_configuration_service.py index 3e7b22d11..a0f5b2399 100644 --- a/backend/services/tool_configuration_service.py +++ b/backend/services/tool_configuration_service.py @@ -31,6 +31,7 @@ from services.vectordatabase_service import get_embedding_model, get_vector_db_core from database.client import minio_client from services.image_service import get_vlm_model +from utils.tool_utils import get_local_tools_classes, get_local_tools_description_zh logger = logging.getLogger("tool_configuration_service") @@ -104,16 +105,35 @@ def get_local_tools() -> List[ToolInfo]: tools_info = [] tools_classes = get_local_tools_classes() for tool_class in tools_classes: + # Get class-level init_param_descriptions for fallback + init_param_descriptions = getattr(tool_class, 'init_param_descriptions', {}) + init_params_list = [] sig = inspect.signature(tool_class.__init__) for param_name, param in sig.parameters.items(): - if param_name == "self" or param.default.exclude: + if param_name == "self": continue + + # Check if parameter has a default value and if it should be excluded + if param.default != inspect.Parameter.empty: + if hasattr(param.default, 'exclude') and param.default.exclude: + continue + + # Get description in both languages + param_description = param.default.description if hasattr(param.default, 'description') else "" + + # First try to get from param.default.description_zh (FieldInfo) + param_description_zh = param.default.description_zh if hasattr(param.default, 'description_zh') else None + + # Fallback to init_param_descriptions if not found + if param_description_zh is None and param_name in init_param_descriptions: + param_description_zh = init_param_descriptions[param_name].get('description_zh') param_info = { "type": python_type_to_json_schema(param.annotation), "name": param_name, - "description": param.default.description + "description": param_description, + "description_zh": param_description_zh } if param.default.default is PydanticUndefined: param_info["optional"] = False @@ -123,14 +143,29 @@ def get_local_tools() -> List[ToolInfo]: init_params_list.append(param_info) - # get tool fixed attributes + # Get tool fixed attributes with bilingual support + tool_description_zh = getattr(tool_class, 'description_zh', None) + tool_inputs = getattr(tool_class, 'inputs', {}) + + # Process inputs to add bilingual descriptions + processed_inputs = {} + if isinstance(tool_inputs, dict): + for key, value in tool_inputs.items(): + if isinstance(value, dict): + processed_inputs[key] = { + **value, + "description_zh": value.get("description_zh") + } + else: + processed_inputs[key] = value + tool_info = ToolInfo( name=getattr(tool_class, 'name'), description=getattr(tool_class, 'description'), + description_zh=tool_description_zh, params=init_params_list, source=ToolSourceEnum.LOCAL.value, - inputs=json.dumps(getattr(tool_class, 'inputs'), - ensure_ascii=False), + inputs=json.dumps(processed_inputs, ensure_ascii=False), output_type=getattr(tool_class, 'output_type'), category=getattr(tool_class, 'category'), class_name=tool_class.__name__, @@ -141,22 +176,6 @@ def get_local_tools() -> List[ToolInfo]: return tools_info -def get_local_tools_classes() -> List[type]: - """ - Get all tool classes from the nexent.core.tools package - - Returns: - List of tool class objects - """ - tools_package = importlib.import_module('nexent.core.tools') - tools_classes = [] - for name in dir(tools_package): - obj = getattr(tools_package, name) - if inspect.isclass(obj): - tools_classes.append(obj) - return tools_classes - - # -------------------------------------------------- # LangChain tools discovery (functions decorated with @tool) # -------------------------------------------------- @@ -427,20 +446,61 @@ async def list_all_tools(tenant_id: str): List all tools for a given tenant """ tools_info = query_all_tools(tenant_id) + + # Get description_zh from SDK for local tools (not persisted to DB) + local_tool_descriptions = get_local_tools_description_zh() + # only return the fields needed formatted_tools = [] for tool in tools_info: + tool_name = tool.get("name") + + # Merge description_zh from SDK for local tools + if tool.get("source") == "local" and tool_name in local_tool_descriptions: + sdk_info = local_tool_descriptions[tool_name] + description_zh = sdk_info.get("description_zh") + + # Merge params description_zh from SDK (independent of tool-level description_zh) + params = tool.get("params", []) + if params: + for param in params: + if not param.get("description_zh"): + # Find matching param in SDK + for sdk_param in sdk_info.get("params", []): + if sdk_param.get("name") == param.get("name"): + param["description_zh"] = sdk_param.get("description_zh") + break + + # Merge inputs description_zh from SDK + inputs_str = tool.get("inputs", "{}") + try: + inputs = json.loads(inputs_str) if isinstance(inputs_str, str) else inputs_str + if isinstance(inputs, dict): + for key, value in inputs.items(): + if isinstance(value, dict) and not value.get("description_zh"): + # Find matching input in SDK + sdk_inputs = sdk_info.get("inputs", {}) + if key in sdk_inputs: + value["description_zh"] = sdk_inputs[key].get("description_zh") + inputs_str = json.dumps(inputs, ensure_ascii=False) + except (json.JSONDecodeError, TypeError): + pass + else: + description_zh = tool.get("description_zh") + inputs_str = tool.get("inputs", "{}") + formatted_tool = { "tool_id": tool.get("tool_id"), - "name": tool.get("name"), + "name": tool_name, "origin_name": tool.get("origin_name"), "description": tool.get("description"), + "description_zh": description_zh, "source": tool.get("source"), "is_available": tool.get("is_available"), "create_time": tool.get("create_time"), "usage": tool.get("usage"), "params": tool.get("params", []), - "inputs": tool.get("inputs", {}), + "inputs": inputs_str, "category": tool.get("category") } formatted_tools.append(formatted_tool) diff --git a/backend/services/vectordatabase_service.py b/backend/services/vectordatabase_service.py index e32f005a3..de79c812c 100644 --- a/backend/services/vectordatabase_service.py +++ b/backend/services/vectordatabase_service.py @@ -395,6 +395,7 @@ def create_knowledge_base( tenant_id: Optional[str], ingroup_permission: Optional[str] = None, group_ids: Optional[List[int]] = None, + embedding_model_name: Optional[str] = None, ): """ Create a new knowledge base with a user-facing name and an internal Elasticsearch index name. @@ -404,11 +405,29 @@ def create_knowledge_base( - Generate index_name as ``knowledge_id + '-' + uuid`` (digits and lowercase letters only). - Use generated index_name as the Elasticsearch index name. + Args: + knowledge_name: User-facing knowledge base name + embedding_dim: Dimension of the embedding vectors (optional) + vdb_core: VectorDatabaseCore instance + user_id: User ID who creates the knowledge base + tenant_id: Tenant ID + ingroup_permission: Permission level (optional) + group_ids: List of group IDs (optional) + embedding_model_name: Specific embedding model name to use (optional). + If provided, will use this model instead of tenant default. + For backward compatibility, legacy callers can still use create_index() directly with an explicit index_name. """ try: - embedding_model = get_embedding_model(tenant_id) + # Get embedding model - use user-selected model if provided, otherwise use tenant default + embedding_model = get_embedding_model(tenant_id, embedding_model_name) + + # Determine the embedding model name to save: use user-provided name if available, + # otherwise use the model's display name + saved_embedding_model_name = embedding_model_name + if not saved_embedding_model_name and embedding_model: + saved_embedding_model_name = embedding_model.model # Create knowledge record first to obtain knowledge_id and generated index_name knowledge_data = { @@ -416,7 +435,7 @@ def create_knowledge_base( "knowledge_describe": "", "user_id": user_id, "tenant_id": tenant_id, - "embedding_model_name": embedding_model.model if embedding_model else None, + "embedding_model_name": saved_embedding_model_name, } # Add group permission and group IDs if provided diff --git a/backend/utils/skill_params_utils.py b/backend/utils/skill_params_utils.py new file mode 100644 index 000000000..404e16ccb --- /dev/null +++ b/backend/utils/skill_params_utils.py @@ -0,0 +1,127 @@ +"""Skill ``params`` helpers: DB storage without UI/YAML comment metadata, round-trip YAML for disk.""" + +from __future__ import annotations + +import json +import logging +import re +from io import StringIO +from typing import Any, Dict, List, Optional, Tuple + +logger = logging.getLogger(__name__) + + +def split_string_inline_comment(s: str) -> Tuple[str, Optional[str]]: + """Split ``value # comment`` at the first `` # `` (same rule as the frontend SkillList).""" + idx = s.find(" # ") + if idx == -1: + return s, None + return s[:idx].rstrip(), s[idx + 3 :].strip() or None + + +def strip_params_comments_for_db(obj: Any) -> Any: + """Remove legacy ``_comment`` keys and trailing `` # `` suffixes from strings for JSON/DB storage.""" + if isinstance(obj, str): + display, _tip = split_string_inline_comment(obj) + return display + if isinstance(obj, list): + return [strip_params_comments_for_db(x) for x in obj] + if isinstance(obj, dict): + out: Dict[str, Any] = {} + for k, v in obj.items(): + if k == "_comment": + continue + out[k] = strip_params_comments_for_db(v) + return out + return obj + + +def _coerce_scalar_display(display: str) -> Any: + """Best-effort restore numbers/bools from merged string form (e.g. after stripping `` # ``).""" + s = display.strip() + if s == "": + return display + try: + return json.loads(s) + except (json.JSONDecodeError, TypeError, ValueError): + pass + if re.fullmatch(r"-?\d+", s): + return int(s) + if re.fullmatch(r"-?\d+\.\d+", s): + return float(s) + low = s.lower() + if low in ("true", "false"): + return low == "true" + return display + + +def _scalar_to_node_and_tip(v: Any) -> Tuple[Any, Optional[str]]: + """Return (typed value, optional comment text) for YAML emission.""" + if isinstance(v, str): + display, tip = split_string_inline_comment(v) + return _coerce_scalar_display(display), tip + return v, None + + +def _dict_to_commented_map(d: Dict[str, Any]) -> Any: + """Build ruamel ``CommentedMap``; only scalar ``value # tip`` strings become YAML block comments above keys.""" + from ruamel.yaml.comments import CommentedMap + + cm = CommentedMap() + for k, v in d.items(): + if k == "_comment": + continue + if isinstance(v, dict): + inner_clean = {kk: vv for kk, vv in v.items() if kk != "_comment"} + cm[k] = _dict_to_commented_map(inner_clean) + elif isinstance(v, list): + cm[k] = _list_to_commented_seq(v) + else: + val, tip = _scalar_to_node_and_tip(v) + cm[k] = val + if tip: + cm.yaml_set_comment_before_after_key(k, before=tip + "\n") + return cm + + +def _list_to_commented_seq(items: List[Any]) -> Any: + from ruamel.yaml.comments import CommentedSeq + + seq = CommentedSeq() + for item in items: + if isinstance(item, dict): + seq.append(_dict_to_commented_map(item)) + elif isinstance(item, list): + seq.append(_list_to_commented_seq(item)) + else: + val, _ = _scalar_to_node_and_tip(item) + seq.append(val) + return seq + + +def params_dict_to_roundtrip_yaml_text(params: Dict[str, Any]) -> str: + """Serialize params to YAML with comments restored (ruamel round-trip). Falls back to PyYAML.""" + try: + from ruamel.yaml import YAML + + cm = _dict_to_commented_map(params) + y = YAML(typ="rt") + y.indent(mapping=2, sequence=4, offset=2) + buf = StringIO() + y.dump(cm, buf) + return buf.getvalue() + except Exception as exc: + logger.warning( + "ruamel round-trip YAML failed (%s); falling back to plain yaml.dump", + exc, + ) + import yaml as pyyaml + + clean = strip_params_comments_for_db(params) + return pyyaml.dump( + clean, + allow_unicode=True, + sort_keys=False, + default_flow_style=False, + width=float("inf"), + ) diff --git a/backend/utils/tool_utils.py b/backend/utils/tool_utils.py new file mode 100644 index 000000000..f06f36bc3 --- /dev/null +++ b/backend/utils/tool_utils.py @@ -0,0 +1,73 @@ +import importlib +import inspect +from typing import List, Dict + + +def get_local_tools_classes() -> List[type]: + """ + Get all tool classes from the nexent.core.tools package + + Returns: + List of tool class objects + """ + tools_package = importlib.import_module('nexent.core.tools') + tools_classes = [] + for name in dir(tools_package): + obj = getattr(tools_package, name) + if inspect.isclass(obj): + tools_classes.append(obj) + return tools_classes + + +def get_local_tools_description_zh() -> Dict[str, Dict]: + """ + Get description_zh for all local tools from SDK (not persisted to DB). + + Returns: + Dict mapping tool name to {"description_zh": ..., "params": [...], "inputs": {...}} + """ + tools_classes = get_local_tools_classes() + result = {} + for tool_class in tools_classes: + tool_name = getattr(tool_class, 'name') + + description_zh = getattr(tool_class, 'description_zh', None) + + init_param_descriptions = getattr(tool_class, 'init_param_descriptions', {}) + + init_params_list = [] + sig = inspect.signature(tool_class.__init__) + for param_name, param in sig.parameters.items(): + if param_name == "self": + continue + + # Check if parameter has a default value and if it should be excluded + if param.default != inspect.Parameter.empty: + if hasattr(param.default, 'exclude') and param.default.exclude: + continue + + param_description_zh = param.default.description_zh if hasattr(param.default, 'description_zh') else None + + if param_description_zh is None and param_name in init_param_descriptions: + param_description_zh = init_param_descriptions[param_name].get('description_zh') + + init_params_list.append({ + "name": param_name, + "description_zh": param_description_zh + }) + + tool_inputs = getattr(tool_class, 'inputs', {}) + inputs_description_zh = {} + if isinstance(tool_inputs, dict): + for key, value in tool_inputs.items(): + if isinstance(value, dict) and value.get("description_zh"): + inputs_description_zh[key] = { + "description_zh": value.get("description_zh") + } + + result[tool_name] = { + "description_zh": description_zh, + "params": init_params_list, + "inputs": inputs_description_zh + } + return result diff --git a/doc/docs/.vitepress/config.mts b/doc/docs/.vitepress/config.mts index 884fa28bb..6855a63f7 100644 --- a/doc/docs/.vitepress/config.mts +++ b/doc/docs/.vitepress/config.mts @@ -1,15 +1,25 @@ -// https://vitepress.dev/reference/site-config +// https://vitepress.dev/reference/site-config import { defineConfig } from "vitepress"; export default defineConfig({ // Set base path for GitHub Pages deployment - base: (globalThis as any).process?.env?.GITHUB_PAGES ? '/nexent/' : '/', + base: (globalThis as any).process?.env?.GITHUB_PAGES ? "/nexent/" : "/", title: "Nexent Doc", description: - "A zero-code platform for auto-generating agents no orchestration, no complex drag-and-drop required.", + "A zero-code platform for auto-generating production-grade AI agents using Harness Engineering principles.", // Add favicon to head - head: [["link", { rel: "icon", href: (globalThis as any).process?.env?.GITHUB_PAGES ? "/nexent/favicon.ico" : "/doc/favicon.ico" }]], + head: [ + [ + "link", + { + rel: "icon", + href: (globalThis as any).process?.env?.GITHUB_PAGES + ? "/nexent/favicon.ico" + : "/doc/favicon.ico", + }, + ], + ], // Ignore localhost links as they are meant for local deployment access ignoreDeadLinks: [ @@ -99,16 +109,40 @@ export default defineConfig({ text: "Memory Management", link: "/en/user-guide/memory-management", }, - { text: "User Management", link: "/en/user-guide/user-management" }, + { + text: "User Management", + link: "/en/user-guide/user-management", + }, + { + text: "Third-party Platform Integrations", + items: [ + { text: "ModelEngine", link: "/en/user-guide/modelengine" }, + ], + }, { text: "Local Tools", items: [ { text: "Overview", link: "/en/user-guide/local-tools/" }, - { text: "File Tools", link: "/en/user-guide/local-tools/file-tools" }, - { text: "Email Tools", link: "/en/user-guide/local-tools/email-tools" }, - { text: "Search Tools", link: "/en/user-guide/local-tools/search-tools" }, - { text: "Multimodal Tools", link: "/en/user-guide/local-tools/multimodal-tools" }, - { text: "Terminal Tool", link: "/en/user-guide/local-tools/terminal-tool" }, + { + text: "File Tools", + link: "/en/user-guide/local-tools/file-tools", + }, + { + text: "Email Tools", + link: "/en/user-guide/local-tools/email-tools", + }, + { + text: "Search Tools", + link: "/en/user-guide/local-tools/search-tools", + }, + { + text: "Multimodal Tools", + link: "/en/user-guide/local-tools/multimodal-tools", + }, + { + text: "Terminal Tool", + link: "/en/user-guide/local-tools/terminal-tool", + }, ], }, ], @@ -134,9 +168,7 @@ export default defineConfig({ }, { text: "Frontend Development", - items: [ - { text: "Overview", link: "/en/frontend/overview" }, - ], + items: [{ text: "Overview", link: "/en/frontend/overview" }], }, { text: "Backend Development", @@ -184,7 +216,10 @@ export default defineConfig({ text: "MCP Ecosystem", items: [ { text: "Overview", link: "/en/mcp-ecosystem/overview" }, - { text: "MCP Recommendations", link: "/en/mcp-ecosystem/mcp-recommendations" }, + { + text: "MCP Recommendations", + link: "/en/mcp-ecosystem/mcp-recommendations", + }, { text: "Use Cases", link: "/en/mcp-ecosystem/use-cases" }, ], }, @@ -289,11 +324,32 @@ export default defineConfig({ text: "本地工具", items: [ { text: "概览", link: "/zh/user-guide/local-tools/" }, - { text: "文件工具", link: "/zh/user-guide/local-tools/file-tools" }, - { text: "邮件工具", link: "/zh/user-guide/local-tools/email-tools" }, - { text: "搜索工具", link: "/zh/user-guide/local-tools/search-tools" }, - { text: "多模态工具", link: "/zh/user-guide/local-tools/multimodal-tools" }, - { text: "终端工具", link: "/zh/user-guide/local-tools/terminal-tool" }, + { + text: "文件工具", + link: "/zh/user-guide/local-tools/file-tools", + }, + { + text: "邮件工具", + link: "/zh/user-guide/local-tools/email-tools", + }, + { + text: "搜索工具", + link: "/zh/user-guide/local-tools/search-tools", + }, + { + text: "多模态工具", + link: "/zh/user-guide/local-tools/multimodal-tools", + }, + { + text: "终端工具", + link: "/zh/user-guide/local-tools/terminal-tool", + }, + ], + }, + { + text: "对接第三方平台", + items: [ + { text: "ModelEngine", link: "/zh/user-guide/modelengine" }, ], }, ], @@ -359,7 +415,10 @@ export default defineConfig({ text: "MCP 生态系统", items: [ { text: "概览", link: "/zh/mcp-ecosystem/overview" }, - { text: "MCP 推荐", link: "/zh/mcp-ecosystem/mcp-recommendations" }, + { + text: "MCP 推荐", + link: "/zh/mcp-ecosystem/mcp-recommendations", + }, { text: "用例场景", link: "/zh/mcp-ecosystem/use-cases" }, ], }, diff --git a/doc/docs/en/getting-started/overview.md b/doc/docs/en/getting-started/overview.md index 560b53510..0f3936ed0 100644 --- a/doc/docs/en/getting-started/overview.md +++ b/doc/docs/en/getting-started/overview.md @@ -1,6 +1,6 @@ # Nexent -Nexent is a zero-code platform for auto-generating agents — no orchestration, no complex drag-and-drop required, using pure language to develop any agent you want. Built on the MCP ecosystem with rich tool integration, Nexent also provides various built-in agents to meet your intelligent service needs in different scenarios such as work, travel, and daily life. Nexent offers powerful capabilities for agent running control, multi-agent collaboration, data processing and knowledge tracing, multimodal dialogue, and batch scaling. +Nexent is a zero-code platform for auto-generating production-grade AI agents, built on **Harness Engineering** principles. It provides unified tools, skills, memory, and orchestration with built-in constraints, feedback loops, and control planes — no orchestration, no complex drag-and-drop required, using pure language to develop any agent you want. > One prompt. Endless reach. @@ -80,5 +80,4 @@ Join our [Discord community](https://discord.gg/tb5H3S3wyv) to chat with other d ## 📄 License -Nexent is licensed under the [MIT](../license) with additional conditions. Please read the [LICENSE](../license) file for details. - +Nexent is licensed under the [MIT License](../license). diff --git a/doc/docs/en/user-guide/agent-development.md b/doc/docs/en/user-guide/agent-development.md index 0bffeca33..db2614f7d 100644 --- a/doc/docs/en/user-guide/agent-development.md +++ b/doc/docs/en/user-guide/agent-development.md @@ -144,7 +144,28 @@ Based on the selected collaborative agents and tools, you can now describe in si 1. In the editor under "Describe how should this agent work", enter a brief description, such as "You are a professional knowledge Q&A assistant with local knowledge search and online search capabilities, synthesizing information to answer user questions" 2. Select a model (choose a smarter model when generating prompts to optimize response logic), click the "Generate Agent" button, and Nexent will generate detailed agent content for you, including basic information and prompts (role, usage requirements, examples) -3. You can edit and fine-tune the auto-generated content (especially the prompts) in the Agent Detail Content below +3. You can edit and fine-tune the auto-generated content (including agent information and prompts) in the Agent Detail Content below + +#### 📋 Agent Basic Information Configuration + +In the basic information section, if you are not satisfied of the auto-generated content, you can configure the following fields by your own: + +| Field | Description | +|-------|-------------| +| **Agent Name** | The display name shown in the interface and recognized by users. | +| **Agent Variable Name** | The internal identifier for the agent, used to reference it in code. Can only contain letters, numbers, and underscores, and must start with a letter or underscore. | +| **Author** | The creator of the agent. Defaults to the current logged-in user's email. | +| **User Group** | The user group the agent belongs to, used for permission management and organization. If empty, the agent has no assigned user group. | +| **Group Permission** | Controls how users in the same group can access this agent:
- **Editable**: Group members can view and edit the agent
- **Read-only**: Group members can only view, not edit
- **Private**: Only the creator and administrators can access | +| **Model** | The LLM used by the agent for reasoning and generating responses. | +| **Max Steps of Agent Run** | The maximum number of think-act cycles the agent can execute in a single conversation. More steps allow the agent to handle more complex tasks, but also consume more resources. | +| **Provide Run Summary** | Controls whether the agent provides run details to the main agent when used as a sub-agent:
- **Enabled (default)**: When used as a sub-agent, provides a detailed run summary to the main agent
- **Disabled**: When used as a sub-agent, only returns the final result without detailed run information | +| **Description** | A description of the agent's functionality, explaining its purpose and capabilities. | + +> 💡 **Usage Suggestions**: +> - Use meaningful English names for the agent variable name, such as `code_assistant`, `data_analyst`, etc. +> - Set the max steps based on task complexity: 3-5 steps for simple Q&A, 10-20 steps for complex reasoning tasks +> - Keep "Provide Run Summary" enabled if the sub-agent's run process is valuable for the main agent's decision-making. Disable it if you only need the final result to reduce context consumption.
diff --git a/doc/docs/en/user-guide/agent-space.md b/doc/docs/en/user-guide/agent-space.md index 56f77a3de..282a0c910 100644 --- a/doc/docs/en/user-guide/agent-space.md +++ b/doc/docs/en/user-guide/agent-space.md @@ -20,8 +20,8 @@ Each agent appears as a card showing: Click a card to open its details: -- **Basic info:** ID, name, description, and status. -- **Model configuration:** Model name, max tokens, business logic model, etc. +- **Basic info:** ID, name, description, status, max steps, and whether to provide run summary. +- **Model configuration:** Model name, business logic model, etc. - **Prompts:** Role, constraints, examples, and the original description. - **Tools:** Every tool the agent can use. - **Sub-agents:** Any collaborative agents that are configured. diff --git a/doc/docs/en/user-guide/modelengine.md b/doc/docs/en/user-guide/modelengine.md new file mode 100644 index 000000000..8c952f9cc --- /dev/null +++ b/doc/docs/en/user-guide/modelengine.md @@ -0,0 +1,131 @@ +# ModelEngine Data Engineering and Model Engineering Integration Guide + +This document provides a detailed guide on how to integrate ModelEngine's Data Engineering (DataMate) and Model Engineering (ModelLite) in the Nexent platform. + +## 1. ModelEngine Overview + +ModelEngine provides an end-to-end AI toolchain for data processing, knowledge generation, model fine-tuning and deployment, as well as RAG (Retrieval Augmented Generation) application development. It aims to shorten the cycle from data to model, and from data to AI application deployment. ModelEngine offers low-code orchestration, flexible execution scheduling, high-performance data bus and other technologies. Combined with built-in data processing operators, RAG framework and extensive ecosystem capabilities, it provides data development engineers, model development engineers, and application development engineers with an efficient, easy-to-use, open, flexible, out-of-the-box, and lightweight full-process AI development experience. + +## 2. Integrating Model Engineering (ModelLite) + +### 2.1 Model Engineering Overview + +ModelLite is a toolchain for model fine-tuning and model inference, hosting and providing access to various AI models. After integrating ModelLite model services in Nexent, you can: + +- Sync all models deployed on the ModelEngine platform +- Use Large Language Models (LLM) for conversation generation +- Use Embedding models for knowledge base processing +- Use Vision Language Models (VLM) for image processing + +### 2.2 Configuration Steps + +#### Step 1: Obtain ModelEngine Credentials + +1. Visit your ModelEngine platform +2. Create an API Key (for authentication) +3. Record the ModelEngine host address (format: `https://:`) + +> ⚠️ **Note**: Make sure you have deployed the required models on ModelEngine, otherwise you won't see any models after syncing. + +#### Step 2: Configure ModelEngine Models in Nexent + +1. Log in to Nexent platform +2. Go to **Model Management** page +3. Click **Sync ModelEngine Configuration** button in **Model Settings** (when deploying Nexent, need to change the value of MODEL_ENGINE_ENABLED variable to True in the .env file) +4. Fill in the following information in the popup: + - **Host Address**: ModelEngine service URL (e.g., `https://:`) + - **Model Type**: Select the model type to integrate + - **API Key**: ModelEngine API Key +5. After configuration, click **Get Models** button. The system will automatically fetch all available models deployed on ModelEngine. Enable the models as needed. +6. Successfully synced models will appear in the model list, marked with "ModelEngine" as the source. + +--- + +## 3. Integrating Data Engineering (DataMate) + +### 3.1 What is Datamate + +DataMate is an enterprise-level data processing platform for model fine-tuning and RAG retrieval. It supports core functions such as data collection, data management, operator marketplace, data cleaning, data synthesis, data annotation, data evaluation, and knowledge generation. By integrating Datamate, you can: + +- Reuse existing Datamate knowledge base resources +- Retrieve Datamate documents in Nexent agents + +### 3.2 Configuration Steps + +#### Step 1: Install and Start Datamate Service + +First, you need to deploy the Datamate service. For detailed installation instructions, refer to [Datamate Official Documentation](https://github.com/ModelEngine-Group/DataMate). + +After startup, record the Datamate service address (e.g., `https://:`). + +#### Step 2: Configure Datamate in Nexent + +1. Log in to Nexent platform +2. Go to **Knowledge Base** page +3. Click **DataMate Configuration** button +4. Fill in the Datamate server address: + - **Datamate URL**: Datamate service address (e.g., `https://:`) +5. After configuration, click **Sync** button. The system will automatically fetch all knowledge bases from Datamate +6. After successful sync, knowledge bases will appear in the knowledge base list, marked with source as "DataMate" + +#### Step 3: Create or Edit Knowledge Base Retrieval Agent + +1. Go to **Agent Development** page +2. Create a new agent or edit an existing one + +#### Step 4: Add Tools + +In the agent configuration page: + +1. Find the **Tool Configuration** section +2. Click **Local Tools > Search** button +3. Select `datamate_search` tool from the tool list: for retrieving Datamate knowledge bases +4. Configure `datamate_search` tool parameters: + + a) Fill in the Datamate server address (usually auto-filled from your previous configuration) + + b) Click **Select Knowledge Base** button + + c) Select Datamate knowledge bases to retrieve from the knowledge base list (multiple selection supported) + + d) Click **Confirm** to save configuration + +--- + +## 4. Comprehensive Usage Example + +### Scenario: Creating a Knowledge Base Retrieval Agent + +1. **Configure ModelEngine Models** + - Go to Model Management page + - Click ModelEngine Configuration, fill in API Key and host address + - After syncing models, select a Large Language Model as the agent's runtime model + +2. **Integrate Datamate Knowledge Base** + - Go to Knowledge Base page + - Click DataMate Configuration, fill in Datamate server address + - Click Sync DataMate Knowledge Bases to get available knowledge base list + +3. **Create Agent** + - Go to Agent Management, create a new agent + - Add `datamate_search` tool in tool configuration + - Select synced Datamate knowledge bases + - Write system prompt, for example: "You are a professional product assistant. You can answer user questions based on documents from the Datamate knowledge base." + +4. **Test Usage** + - Interact with the agent on the chat page + - Ask product-related questions, the agent will automatically retrieve relevant content from Datamate knowledge base and respond + +--- + +## 5. Related Resources + +- [Nexent Official Documentation](https://modelengine-group.github.io/nexent) +- [ModelEngine Official Documentation](https://support.huawei.com/enterprise/zh/fusioncube/modelengine-pid-261508006) +- [Datamate Official Documentation](https://github.com/ModelEngine-Group/DataMate) + +--- + +## 6. Technical Support + +If you encounter issues during usage, feel free to ask questions on [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions) diff --git a/doc/docs/zh/contributing.md b/doc/docs/zh/contributing.md index e11a5d37a..81d6ed77f 100644 --- a/doc/docs/zh/contributing.md +++ b/doc/docs/zh/contributing.md @@ -153,6 +153,7 @@ git checkout -b 您的分支名 | 工程优化 | 🔨 | 工程工具更新、配置调整 | | 文档更新 | 📝 | 只改动文档内容 | | 添加测试用例 | 🧪 | 添加测试用例或修改测试用例 | +| 依赖更新 | 📦 | 更新依赖版本,移除废弃API,清理过时用法 | 示例提交消息: ```bash diff --git a/doc/docs/zh/getting-started/overview.md b/doc/docs/zh/getting-started/overview.md index abbbdd4ba..e5bc95549 100644 --- a/doc/docs/zh/getting-started/overview.md +++ b/doc/docs/zh/getting-started/overview.md @@ -1,6 +1,6 @@ # Nexent -Nexent 是一个零代码智能体自动生成平台 —— 无需编排,无需复杂的拖拉拽操作,使用纯语言开发你想要的任何智能体。基于MCP生态,具备丰富的工具集成,同时提供多种自带智能体,满足你的工作、旅行、生活等不同场景的智能服务需要。Nexent 还提供强大的智能体运行控制、多智能体协作、数据处理和知识溯源、多模态对话、批量扩展能力。 +Nexent 是一个基于 **Harness Engineering** 原则打造的零代码智能体自动生成平台。集统一工具、技能、记忆和编排能力于一体,内置约束机制、反馈循环和控制平面。无需编排,无需复杂的拖拉拽操作,使用纯语言开发你想要的任何智能体。 > 一个提示词,无限种可能。 @@ -80,4 +80,4 @@ Nexent 采用现代化的分布式微服务架构,专为高性能、可扩展 ## 📄 许可证 -Nexent 采用 [MIT](../license) 许可证,并附有额外条件。请阅读 [LICENSE](../license) 文件了解详情。 \ No newline at end of file +Nexent 采用 [MIT 许可证](../license)。 diff --git a/doc/docs/zh/opensource-memorial-wall.md b/doc/docs/zh/opensource-memorial-wall.md index 9b438ab1b..adb25ed42 100644 --- a/doc/docs/zh/opensource-memorial-wall.md +++ b/doc/docs/zh/opensource-memorial-wall.md @@ -731,3 +731,15 @@ Happy to find a easy-to-use AI Agent Platform ::: info ichigoichie - 2026-03-10 被 Nexent 官网吸引,希望深入了解产品并应用于工作场景,提升工作效率。 ::: + +::: info whale0110 - 2026-03-12 +祝Nexent发展越来越好,小白第一次尝试! +::: + +::: info shen_e - 2026-03-12 +感谢这个平台为我的项目提供了丰富的智能体获取途径! +::: + +::: info BigBen0724 - 2026-03-16 +在体验一众AI工具后,被 Nexent 产品所吸引,希望这一智能体开发平台能赋能我的工作生活! +::: diff --git a/doc/docs/zh/user-guide/agent-development.md b/doc/docs/zh/user-guide/agent-development.md index eebed03cf..cb4b4055d 100644 --- a/doc/docs/zh/user-guide/agent-development.md +++ b/doc/docs/zh/user-guide/agent-development.md @@ -4,7 +4,7 @@ ## 🔧 创建智能体 -在 Agent 管理页签下,点击"创建 Agent"即可创建一个空白智能体,点击"退出创建"即可退出创建模式。 +在智能体管理页签下,点击"创建 Agent"即可创建一个空白智能体,点击"退出创建"即可退出创建模式。 如果您有现成的智能体配置,也可以导入使用: 1. 点击"导入 Agent" @@ -16,8 +16,8 @@
> ⚠️ **提示**:如果导入了重名的智能体,系统会弹出提示弹窗。您可以选择: -> - **直接导入**:保留重复名称,导入后的智能体会处于不可用状态,需手动修改 Agent 名称和变量名后才能使用 -> - **重新生成并导入**:系统将调用 LLM 对 Agent 进行重命名,会消耗一定的模型 token 数,可能耗时较长 +> - **直接导入**:保留重复名称,导入后的智能体会处于不可用状态,需手动修改智能体名称和变量名后才能使用 +> - **重新生成并导入**:系统将调用 LLM 对智能体进行重命名,会消耗一定的模型 token 数,可能耗时较长 > 📌 **重要说明**:通过导入创建的智能体,如果其工具中包含 `knowledge_base_search` 等知识库检索工具,这些工具只会检索**当前登录用户在本环境中有权限访问的知识库**。导入文件中原有的知识库配置不会自动继承,因此实际检索结果和回答效果,可能与智能体原作者环境下的表现存在差异。 @@ -40,11 +40,11 @@ -### 🛠️ 选择 Agent 的工具 +### 🛠️ 选择智能体的工具 智能体可以使用各种工具来完成任务,如知识库检索、文件解析、图片解析、收发邮件、文件管理等本地工具,也可接入第三方 MCP 工具,或自定义工具。 -1. 在"选择 Agent 的工具"页签右侧,点击"刷新工具"来刷新可用工具列表 +1. 在"选择智能体的工具"页签右侧,点击"刷新工具"来刷新可用工具列表 2. 选择想要添加工具所在的分组 3. 查看分组下可选用的所有工具,可点击 ⚙️ 查看工具描述,进行工具参数配置 4. 点击工具名即可选中该工具,再次点击可取消选择 @@ -64,7 +64,7 @@ ### 🔌 添加 MCP 工具 -在"选择 Agent 的工具"页签右侧,点击"MCP 配置",可在弹窗中进行 MCP 服务器的配置,查看已配置的 MCP 服务器 +在"选择智能体的工具"页签右侧,点击"MCP 配置",可在弹窗中进行 MCP 服务器的配置,查看已配置的 MCP 服务器 您可以通过以下两种方式在 Nexent 中添加 MCP 服务 @@ -110,7 +110,7 @@ ### ⚙️ 自定义工具 -您可参考以下指导文档,开发自己的工具,并接入 Nexent 使用,丰富 Agent 能力。 +您可参考以下指导文档,开发自己的工具,并接入 Nexent 使用,丰富智能体能力。 - [LangChain 工具指南](../backend/tools/langchain) - [MCP 工具开发](../backend/tools/mcp) @@ -118,7 +118,7 @@ ### 🧪 工具测试 -无论是什么类型的工具(内置工具、外部接入的 MCP 工具,还是自定义开发工具),Nexent 都提供了"工具测试"能力。如果您在创建 Agent 时不确定某个工具的效果,可以使用测试功能来验证工具是否按预期工作。 +无论是什么类型的工具(内置工具、外部接入的 MCP 工具,还是自定义开发工具),Nexent 都提供了"工具测试"能力。如果您在创建智能体时不确定某个工具的效果,可以使用测试功能来验证工具是否按预期工作。 1. 点击工具的小齿轮按钮 ⚙️,进入工具的详细配置弹窗 2. 首先确保已经配置了工具的必备参数(带红色星号的参数) @@ -138,13 +138,34 @@ ## 📝 描述业务逻辑 -### ✍️ 描述 Agent 应该如何工作 +### ✍️ 描述智能体应该如何工作 -根据选择的协作 Agent 和工具,您现在可以用简洁的语言来描述,您希望这个 Agent 应该如何工作。Nexent 会根据您的配置和描述,自动为您生成 Agent 名称、描述以及提示词等信息。 +根据选择的协作智能体和工具,您现在可以用简洁的语言来描述,您希望这个智能体应该如何工作。Nexent 会根据您的描述,自动为您生成智能体配置以及提示词等信息。 -1. 在"描述 Agent 应该如何工作"下的编辑框中,输入简洁描述,如"你是一个专业的知识问答小助手,具备本地知识检索和联网检索能力,综合信息以回答用户问题" -2. 选择模型(生成提示词时选择更聪明的模型以优化回复逻辑),点击"生成智能体"按钮,Nexent 会为您生成 Agent 详细内容,包括基础信息以及提示词(角色、使用要求、示例) -3. 您可在下方 Agent 详细内容中,针对自动生成的内容(特别是提示词)进行编辑微调 +1. 在"描述智能体应该如何工作"下的编辑框中,输入简洁描述,如"你是一个专业的知识问答小助手,具备本地知识检索和联网检索能力,综合信息以回答用户问题" +2. 选择模型(生成提示词时选择更聪明的模型以优化回复逻辑),点击"生成智能体"按钮,Nexent 会为您生成智能体详细内容,包括基础信息以及提示词(角色、使用要求、示例) +3. 您可在下方智能体详细内容中,针对自动生成的内容(包括基础信息和提示词)进行编辑微调 + +#### 📋 智能体基础信息配置 + +在基础信息区域,若您对自动生成的内容不满意,您可以手工修改以下各项: + +| 配置项 | 说明 | +|--------|------| +| **智能体名称** | 智能体的展示名称,用于界面显示和用户识别。 | +| **智能体变量名** | 智能体的内部标识名称,用于代码中引用该智能体。只能包含字母、数字和下划线,且必须以字母或下划线开头。 | +| **作者** | 智能体的创建者名称,默认值为当前登录用户的邮箱。 | +| **用户组** | 智能体所属的用户组,用于权限管理和组织管理。若为空,则表示无所属用户组。 | +| **组内权限** | 控制同组用户对该智能体的访问权限:
- **同组可编辑**:同组用户可以查看和编辑该智能体
- **同组只读**:同组用户只能查看,不能编辑
- **私有**:只有创建者和管理员可以访问 | +| **大语言模型** | 智能体运行时使用的大语言模型,用于处理推理和生成回复。 | +| **智能体运行最大步骤数** | 智能体在单次对话中最多可以执行的思考-行动循环次数。步数越多,智能体可以处理更复杂的任务,但也会消耗更多资源。 | +| **提供运行摘要** | 控制智能体在被用作子智能体时,是否向主智能体提供运行细节:
- **开启(默认)**:当此智能体被用作子智能体时,会向主智能体提供详细的运行过程摘要
- **关闭**:当此智能体被用作子智能体时,只返回最终结果,不提供详细的运行过程 | +| **智能体描述** | 智能体的功能描述,用于说明智能体的用途和能力。 | + +> 💡 **使用建议**: +> - 智能体变量名应使用有意义的英文命名,如 `code_assistant`、`data_analyst` 等 +> - 智能体运行最大步骤数建议根据任务复杂度设置,简单的问答任务可设为 3-5 步,复杂的推理任务可设为 10-20 步 +> - 如果子智能体的运行过程对主智能体的决策有参考价值,建议开启"提供运行摘要"选项。如果只需要子智能体的最终结果以减少上下文消耗,建议关闭此选项
@@ -152,7 +173,7 @@ ### 🐛 调试与保存 -在完成初步 Agent 配置后,您可以对 Agent 进行调试,根据调试结果微调提示词,持续提升 Agent 表现。 +在完成初步智能体配置后,您可以对智能体进行调试,根据调试结果微调提示词,持续提升智能体表现。 1. 在页面右下角点击"调试"按钮,弹出智能体调试页面 2. 与智能体进行测试对话,观察智能体的响应和行为 @@ -187,7 +208,7 @@ Nexent 支持智能体的版本管理,您可以在调试过程中,保存不 ### 📤 导出 -可将调试成功的智能体导出为 JSON 配置文件,在创建 Agent 时可以使用此 JSON 文件以导入的方式创建副本。 +可将调试成功的智能体导出为 JSON 配置文件,在创建智能体时可以使用此 JSON 文件以导入的方式创建副本。 ### 📋 复制 diff --git a/doc/docs/zh/user-guide/agent-space.md b/doc/docs/zh/user-guide/agent-space.md index ff9cc7219..c6a76df6b 100644 --- a/doc/docs/zh/user-guide/agent-space.md +++ b/doc/docs/zh/user-guide/agent-space.md @@ -22,8 +22,8 @@ 点击智能体卡片,即可查看智能体详细信息: -- **基础信息**:智能体ID、名称、描述、状态等 -- **模型配置**:模型名称、最大部署、业务逻辑模型名称等 +- **基础信息**:智能体ID、名称、描述、状态、最大步数、提供运行摘要等 +- **模型配置**:模型名称、业务逻辑模型名称等 - **提示词**:包含角色提示词、约束提示词、示例提示词、以及原始业务描述 - **工具**:配置的工具 - **子智能体**:配置的子智能体 diff --git a/doc/docs/zh/user-guide/modelengine.md b/doc/docs/zh/user-guide/modelengine.md new file mode 100644 index 000000000..45fe74cac --- /dev/null +++ b/doc/docs/zh/user-guide/modelengine.md @@ -0,0 +1,131 @@ +# ModelEngine 数据工程和模型工程对接指南 + +本文档详细介绍如何在 Nexent 平台中对接 ModelEngine 的数据工程(DataMate)和模型工程(ModelLite)。 + +## 1. ModelEngine介绍 + +ModelEngine提供从数据处理、知识生成,到模型微调和部署,以及RAG(Retrieval Augmented Generation)应用开发的AI训推全流程工具链,用于缩短从数据到模型、 数据到AI应用的落地周期。ModelEngine提供低代码编排、灵活的执行调度、高性能 数据总线等技术,结合内置的数据处理算子、RAG框架以及广泛的生态能力,为数据 开发工程师、模型开发工程师、应用开发工程师提供高效易用、开放灵活、开箱即用、轻量的全流程AI开发体验。 + +## 2. 对接模型工程(ModelLite) + +### 2.1 模型工程介绍 + +ModelLite是一个面向模型微调和模型推理的工具链,托管并提供多种 AI 模型的访问服务。在 Nexent 中对接 ModelLite 模型服务后,您可以: + +- 同步在 ModelEngine 平台上部署的所有模型 +- 使用大语言模型 (LLM) 进行对话生成 +- 使用向量化模型 (Embedding) 进行知识库处理 +- 使用视觉语言模型 (VLM) 处理图片 + +### 2.2 配置步骤 + +#### 步骤 1:获取 ModelEngine 访问凭证 + +1. 访问您的 ModelEngine 平台 +2. 创建 API Key(用于身份验证) +3. 记录 ModelEngine 的主机地址(格式:`https://:`) + +> ⚠️ **注意**:确保您已在 ModelEngine 平台上部署了需要的模型,否则同步后将无法看到模型列表。 + +#### 步骤 2:在 Nexent 中配置 ModelEngine模型 + +1. 登录 Nexent 平台 +2. 进入 **模型管理** 页面 +3. 点击**模型设置中**的 **同步ModelEngine 配置** 按钮 (部署Nexent时,需将.env文件中 MODEL_ENGINE_ENABLED变量值改为 True) +4. 在弹窗中填写以下信息: + - **主机地址**:ModelEngine 服务的 URL(如 `https://:`) + - **模型类型**:选择对接的模型类型 + - **API Key**:ModelEngine API Key +5. 配置完成后,点击 **获取模型** 按钮,系统将自动获取 ModelEngine 上部署的所有可用模型,根据需要启用对应的模型。 +6. 同步成功的模型将显示在模型列表中,并标记为 "ModelEngine" 来源。 + +--- + +## 3. 对接数据工程(DataMate) + +### 3.1 什么是 Datamate + +DataMate是面向模型微调与RAG检索的企业级数据处理平台,支持数据归集、数据管理、算子市场、数据清洗、数据合成、数据标注、数据评估、知识生成等核心功能。通过对接 Datamate,您可以: + +- 复用已有的 Datamate 知识库资源 +- 在 Nexent 智能体中检索 Datamate 中的文档 + +### 3.2 配置步骤 + +#### 步骤 1:安装和启动 Datamate 服务 + +首先,您需要部署 Datamate 服务。详细安装步骤请参考 [Datamate 官方文档](https://github.com/ModelEngine-Group/DataMate)。 + +启动后,记录 Datamate 的服务地址(如`https://:`)。 + +#### 步骤 2:在 Nexent 中配置 Datamate + +1. 登录 Nexent 平台 +2. 进入 **知识库** 页面 +3. 点击 **DataMate 配置** 按钮 +4. 填写 Datamate 服务器地址: + - **Datamate URL**:Datamate 服务的地址(如 `https://:`) +5. 配置完成后,点击 **同步** 按钮,系统将自动获取 Datamate 中的所有知识库 +6. 同步成功后,知识库将显示在知识库列表中,标记来源为 "DataMate" + +#### 步骤 3:创建或编辑知识库检索智能体 + +1. 进入 **智能体开发** 页面 +2. 创建新智能体或编辑现有智能体 + +#### 步骤 4:添加工具 + +在智能体配置页面: + +1. 找到 **工具配置** 部分 +2. 点击 **本地工具 > search** 按钮 +3. 从工具列表中选择`datamate_search`工具:用于检索 Datamate 知识库 +4. 配置`datamate_search`工具参数: + + a) 填写 Datamate 服务器地址(通常会自动填充您之前配置的地址) + + b) 点击 **选择知识库** 按钮 + + c) 从知识库列表中选择要检索的 Datamate 知识库(可多选) + + d) 点击 **确定** 保存配置 + +--- + +## 4. 综合使用示例 + +### 场景:创建一个知识库检索智能体 + +1. **配置 ModelEngine 模型** + - 进入模型管理页面 + - 点击 ModelEngine 配置,填写 API Key 和主机地址 + - 同步模型后,选择一个大语言模型作为智能体的运行模型 + +2. **对接 Datamate 知识库** + - 进入知识库页面 + - 点击 DataMate 配置,填写 Datamate 服务器地址 + - 点击同步 DataMate 知识库,获取可用的知识库列表 + +3. **创建智能体** + - 进入智能体管理,创建新智能体 + - 在工具配置中添加 `datamate_search` 工具 + - 选择已同步的 Datamate 知识库 + - 编写系统提示词,例如:"你是一个专业的产品助手,可以根据 Datamate 知识库中的文档回答用户问题。" + +4. **测试使用** + - 在对话页面与智能体交互 + - 询问产品相关问题,智能体将自动从 Datamate 知识库检索相关内容并回答 + +--- + +## 5. 相关资源 + +- [Nexent 官方文档](https://modelengine-group.github.io/nexent) +- [ModelEngine 官方文档](https://support.huawei.com/enterprise/zh/fusioncube/modelengine-pid-261508006) +- [Datamate 官方文档](https://github.com/ModelEngine-Group/DataMate) + +--- + +## 6. 技术支持 + +如果在使用过程中遇到问题,欢迎在 [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions) 提问 diff --git a/docker/.env.example b/docker/.env.example index 677ccb7c7..d03cf6113 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -142,6 +142,8 @@ QUEUES=process_q,forward_q WORKER_NAME= WORKER_CONCURRENCY=4 +# Skills Configuration +SKILLS_PATH=/mnt/nexent/skills # Telemetry and Monitoring Configuration ENABLE_TELEMETRY=false diff --git a/docker/init.sql b/docker/init.sql index 02e99632c..75e9a818f 100644 --- a/docker/init.sql +++ b/docker/init.sql @@ -651,47 +651,6 @@ BEFORE UPDATE ON "nexent"."memory_user_config_t" FOR EACH ROW EXECUTE FUNCTION "update_memory_user_config_update_time"(); --- Create partner mapping id table -CREATE TABLE IF NOT EXISTS "nexent"."partner_mapping_id_t" ( - "mapping_id" serial PRIMARY KEY NOT NULL, - "external_id" varchar(100) COLLATE "pg_catalog"."default", - "internal_id" int4, - "mapping_type" varchar(30) COLLATE "pg_catalog"."default", - "tenant_id" varchar(100) COLLATE "pg_catalog"."default", - "user_id" varchar(100) COLLATE "pg_catalog"."default", - "create_time" timestamp(6) DEFAULT CURRENT_TIMESTAMP, - "update_time" timestamp(6) DEFAULT CURRENT_TIMESTAMP, - "created_by" varchar(100) COLLATE "pg_catalog"."default", - "updated_by" varchar(100) COLLATE "pg_catalog"."default", - "delete_flag" varchar(1) COLLATE "pg_catalog"."default" DEFAULT 'N'::character varying -); - -ALTER TABLE "nexent"."partner_mapping_id_t" OWNER TO "root"; - -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."mapping_id" IS 'ID'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."external_id" IS 'The external id given by the outer partner'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."internal_id" IS 'The internal id of the other database table'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."mapping_type" IS 'Type of the external - internal mapping, value set: CONVERSATION'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."tenant_id" IS 'Tenant ID'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."user_id" IS 'User ID'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."create_time" IS 'Creation time'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."update_time" IS 'Update time'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."created_by" IS 'Creator'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."updated_by" IS 'Updater'; -COMMENT ON COLUMN "nexent"."partner_mapping_id_t"."delete_flag" IS 'Whether it is deleted. Optional values: Y/N'; - -CREATE OR REPLACE FUNCTION "update_partner_mapping_update_time"() -RETURNS TRIGGER AS $$ -BEGIN - NEW.update_time = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER "update_partner_mapping_update_time_trigger" -BEFORE UPDATE ON "nexent"."partner_mapping_id_t" -FOR EACH ROW -EXECUTE FUNCTION "update_partner_mapping_update_time"(); -- 1. Create tenant_invitation_code_t table for invitation codes CREATE TABLE IF NOT EXISTS nexent.tenant_invitation_code_t ( @@ -1049,3 +1008,162 @@ COMMENT ON COLUMN nexent.ag_tenant_agent_version_t.create_time IS 'Version creat COMMENT ON COLUMN nexent.ag_tenant_agent_version_t.updated_by IS 'Last user who updated this version'; COMMENT ON COLUMN nexent.ag_tenant_agent_version_t.update_time IS 'Last update timestamp'; COMMENT ON COLUMN nexent.ag_tenant_agent_version_t.delete_flag IS 'Soft delete flag: Y/N'; + +-- Create the user_token_info_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.user_token_info_t ( + token_id SERIAL4 PRIMARY KEY NOT NULL, + access_key VARCHAR(100) NOT NULL, + user_id VARCHAR(100) NOT NULL, + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + created_by VARCHAR(100), + updated_by VARCHAR(100), + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "user_token_info_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.user_token_info_t IS 'User token (AK/SK) information table'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.user_token_info_t.token_id IS 'Token ID, unique primary key'; +COMMENT ON COLUMN nexent.user_token_info_t.access_key IS 'Access Key (AK)'; +COMMENT ON COLUMN nexent.user_token_info_t.user_id IS 'User ID who owns this token'; +COMMENT ON COLUMN nexent.user_token_info_t.create_time IS 'Creation time, audit field'; +COMMENT ON COLUMN nexent.user_token_info_t.update_time IS 'Update time, audit field'; +COMMENT ON COLUMN nexent.user_token_info_t.created_by IS 'Creator ID, audit field'; +COMMENT ON COLUMN nexent.user_token_info_t.updated_by IS 'Last updater ID, audit field'; +COMMENT ON COLUMN nexent.user_token_info_t.delete_flag IS 'Soft delete flag, Y means deleted'; + + +-- Create the user_token_usage_log_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.user_token_usage_log_t ( + token_usage_id SERIAL4 PRIMARY KEY NOT NULL, + token_id INT4 NOT NULL, + call_function_name VARCHAR(100), + related_id INT4, + meta_data JSONB, + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + created_by VARCHAR(100), + updated_by VARCHAR(100), + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "user_token_usage_log_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.user_token_usage_log_t IS 'User token usage log table'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.user_token_usage_log_t.token_usage_id IS 'Token usage log ID, unique primary key'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.token_id IS 'Foreign key to user_token_info_t.token_id'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.call_function_name IS 'API function name being called'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.related_id IS 'Related resource ID (e.g., conversation_id)'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.meta_data IS 'Additional metadata for this usage log entry, stored as JSON'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.create_time IS 'Creation time, audit field'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.update_time IS 'Update time, audit field'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.created_by IS 'Creator ID, audit field'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.updated_by IS 'Last updater ID, audit field'; +COMMENT ON COLUMN nexent.user_token_usage_log_t.delete_flag IS 'Soft delete flag, Y means deleted'; + +-- Create the ag_skill_info_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.ag_skill_info_t ( + skill_id SERIAL4 PRIMARY KEY NOT NULL, + skill_name VARCHAR(100) NOT NULL, + skill_description VARCHAR(1000), + skill_tags JSON, + skill_content TEXT, + params JSON, + source VARCHAR(30) DEFAULT 'official', + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "ag_skill_info_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_info_t IS 'Skill information table for managing custom skills'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_id IS 'Skill ID, unique primary key'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_name IS 'Skill name, globally unique'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_description IS 'Skill description text'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_tags IS 'Skill tags stored as JSON array'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_content IS 'Skill content or prompt text'; +COMMENT ON COLUMN nexent.ag_skill_info_t.params IS 'Skill configuration parameters stored as JSON object'; +COMMENT ON COLUMN nexent.ag_skill_info_t.source IS 'Skill source: official, custom, or partner'; +COMMENT ON COLUMN nexent.ag_skill_info_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_info_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_info_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_info_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_info_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; + +-- Create the ag_skill_tools_rel_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.ag_skill_tools_rel_t ( + rel_id SERIAL4 PRIMARY KEY NOT NULL, + skill_id INTEGER, + tool_id INTEGER, + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "ag_skill_tools_rel_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_tools_rel_t IS 'Skill-tool relationship table for many-to-many mapping'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.rel_id IS 'Relationship ID, unique primary key'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.skill_id IS 'Foreign key to ag_skill_info_t.skill_id'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.tool_id IS 'Tool ID from ag_tool_info_t'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; + +-- Create the ag_skill_instance_t table in the nexent schema +-- Stores skill instance configuration per agent version +-- Note: skill_description and skill_content fields removed, now retrieved from ag_skill_info_t +CREATE TABLE IF NOT EXISTS nexent.ag_skill_instance_t ( + skill_instance_id SERIAL4 NOT NULL, + skill_id INTEGER NOT NULL, + agent_id INTEGER NOT NULL, + user_id VARCHAR(100), + tenant_id VARCHAR(100), + enabled BOOLEAN DEFAULT TRUE, + version_no INTEGER DEFAULT 0 NOT NULL, + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N', + CONSTRAINT ag_skill_instance_t_pkey PRIMARY KEY (skill_instance_id, version_no) +); + +ALTER TABLE "ag_skill_instance_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_instance_t IS 'Skill instance configuration table - stores per-agent skill settings'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_instance_t.skill_instance_id IS 'Skill instance ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.skill_id IS 'Foreign key to ag_skill_info_t.skill_id'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.agent_id IS 'Agent ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.user_id IS 'User ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.tenant_id IS 'Tenant ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.enabled IS 'Whether this skill is enabled for the agent'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.version_no IS 'Version number. 0 = draft/editing state, >=1 = published snapshot'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; diff --git a/docker/sql/v1.8.1_0306_add_user_token_info.sql b/docker/sql/v1.8.1_0306_add_user_token_info.sql index 040530334..402cf4bab 100644 --- a/docker/sql/v1.8.1_0306_add_user_token_info.sql +++ b/docker/sql/v1.8.1_0306_add_user_token_info.sql @@ -32,34 +32,6 @@ COMMENT ON COLUMN nexent.user_token_info_t.created_by IS 'Creator ID, audit fiel COMMENT ON COLUMN nexent.user_token_info_t.updated_by IS 'Last updater ID, audit field'; COMMENT ON COLUMN nexent.user_token_info_t.delete_flag IS 'Soft delete flag, Y means deleted'; --- Create unique index on access_key to ensure uniqueness -CREATE UNIQUE INDEX IF NOT EXISTS idx_user_token_info_access_key ON nexent.user_token_info_t(access_key) WHERE delete_flag = 'N'; - --- Create index on user_id for query performance -CREATE INDEX IF NOT EXISTS idx_user_token_info_user_id ON nexent.user_token_info_t(user_id) WHERE delete_flag = 'N'; - --- Create a function to update the update_time column -CREATE OR REPLACE FUNCTION update_user_token_info_update_time() -RETURNS TRIGGER AS $$ -BEGIN - NEW.update_time = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - --- Add comment to the function -COMMENT ON FUNCTION update_user_token_info_update_time() IS 'Function to update the update_time column when a record in user_token_info_t is updated'; - --- Create a trigger to call the function before each update -DROP TRIGGER IF EXISTS update_user_token_info_update_time_trigger ON nexent.user_token_info_t; -CREATE TRIGGER update_user_token_info_update_time_trigger -BEFORE UPDATE ON nexent.user_token_info_t -FOR EACH ROW -EXECUTE FUNCTION update_user_token_info_update_time(); - --- Add comment to the trigger -COMMENT ON TRIGGER update_user_token_info_update_time_trigger ON nexent.user_token_info_t IS 'Trigger to call update_user_token_info_update_time function before each update on user_token_info_t table'; - -- Create the user_token_usage_log_t table in the nexent schema CREATE TABLE IF NOT EXISTS nexent.user_token_usage_log_t ( @@ -92,20 +64,6 @@ COMMENT ON COLUMN nexent.user_token_usage_log_t.created_by IS 'Creator ID, audit COMMENT ON COLUMN nexent.user_token_usage_log_t.updated_by IS 'Last updater ID, audit field'; COMMENT ON COLUMN nexent.user_token_usage_log_t.delete_flag IS 'Soft delete flag, Y means deleted'; --- Create index on token_id for query performance -CREATE INDEX IF NOT EXISTS idx_user_token_usage_log_token_id ON nexent.user_token_usage_log_t(token_id); - --- Create index on call_function_name for query performance -CREATE INDEX IF NOT EXISTS idx_user_token_usage_log_function_name ON nexent.user_token_usage_log_t(call_function_name); - --- Add foreign key constraint -ALTER TABLE nexent.user_token_usage_log_t -ADD CONSTRAINT fk_user_token_usage_log_token_id -FOREIGN KEY (token_id) -REFERENCES nexent.user_token_info_t(token_id) -ON DELETE CASCADE; - - -- Migration: Remove partner_mapping_id_t table for northbound conversation ID mapping -- Date: 2026-03-10 -- Description: Remove the external-internal conversation ID mapping table as northbound APIs now use internal conversation IDs directly diff --git a/docker/sql/v2.0.0_0314_add_context_skill_t.sql b/docker/sql/v2.0.0_0314_add_context_skill_t.sql new file mode 100644 index 000000000..5fd23c97e --- /dev/null +++ b/docker/sql/v2.0.0_0314_add_context_skill_t.sql @@ -0,0 +1,105 @@ +-- Migration: Add ag_skill_info_t, ag_skill_tools_rel_t, and ag_skill_instance_t tables +-- Date: 2026-03-14 +-- Description: Create skill management tables with skill content, tags, and tool relationships + +SET search_path TO nexent; + +-- Create the ag_skill_info_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.ag_skill_info_t ( + skill_id SERIAL4 PRIMARY KEY NOT NULL, + skill_name VARCHAR(100) NOT NULL, + skill_description VARCHAR(1000), + skill_tags JSON, + skill_content TEXT, + params JSON, + source VARCHAR(30) DEFAULT 'official', + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "ag_skill_info_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_info_t IS 'Skill information table for managing custom skills'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_id IS 'Skill ID, unique primary key'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_name IS 'Skill name, globally unique'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_description IS 'Skill description text'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_tags IS 'Skill tags stored as JSON array'; +COMMENT ON COLUMN nexent.ag_skill_info_t.skill_content IS 'Skill content or prompt text'; +COMMENT ON COLUMN nexent.ag_skill_info_t.params IS 'Skill configuration parameters stored as JSON object'; +COMMENT ON COLUMN nexent.ag_skill_info_t.source IS 'Skill source: official, custom, or partner'; +COMMENT ON COLUMN nexent.ag_skill_info_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_info_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_info_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_info_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_info_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; + +-- Create the ag_skill_tools_rel_t table in the nexent schema +CREATE TABLE IF NOT EXISTS nexent.ag_skill_tools_rel_t ( + rel_id SERIAL4 PRIMARY KEY NOT NULL, + skill_id INTEGER, + tool_id INTEGER, + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N' +); + +ALTER TABLE "ag_skill_tools_rel_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_tools_rel_t IS 'Skill-tool relationship table for many-to-many mapping'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.rel_id IS 'Relationship ID, unique primary key'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.skill_id IS 'Foreign key to ag_skill_info_t.skill_id'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.tool_id IS 'Tool ID from ag_tool_info_t'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_tools_rel_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; + +-- Create the ag_skill_instance_t table in the nexent schema +-- Stores skill instance configuration per agent version +-- Note: skill_description and skill_content fields removed, now retrieved from ag_skill_info_t +CREATE TABLE IF NOT EXISTS nexent.ag_skill_instance_t ( + skill_instance_id SERIAL4 NOT NULL, + skill_id INTEGER NOT NULL, + agent_id INTEGER NOT NULL, + user_id VARCHAR(100), + tenant_id VARCHAR(100), + enabled BOOLEAN DEFAULT TRUE, + version_no INTEGER DEFAULT 0 NOT NULL, + created_by VARCHAR(100), + create_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(100), + update_time TIMESTAMP WITHOUT TIME ZONE DEFAULT CURRENT_TIMESTAMP, + delete_flag VARCHAR(1) DEFAULT 'N', + CONSTRAINT ag_skill_instance_t_pkey PRIMARY KEY (skill_instance_id, version_no) +); + +ALTER TABLE "ag_skill_instance_t" OWNER TO "root"; + +-- Add comment to the table +COMMENT ON TABLE nexent.ag_skill_instance_t IS 'Skill instance configuration table - stores per-agent skill settings'; + +-- Add comments to the columns +COMMENT ON COLUMN nexent.ag_skill_instance_t.skill_instance_id IS 'Skill instance ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.skill_id IS 'Foreign key to ag_skill_info_t.skill_id'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.agent_id IS 'Agent ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.user_id IS 'User ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.tenant_id IS 'Tenant ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.enabled IS 'Whether this skill is enabled for the agent'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.version_no IS 'Version number. 0 = draft/editing state, >=1 = published snapshot'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.created_by IS 'Creator ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.create_time IS 'Creation timestamp'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.updated_by IS 'Last updater ID'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.update_time IS 'Last update timestamp'; +COMMENT ON COLUMN nexent.ag_skill_instance_t.delete_flag IS 'Whether it is deleted. Optional values: Y/N'; diff --git a/frontend/app/[locale]/agents/components/AgentConfigComp.tsx b/frontend/app/[locale]/agents/components/AgentConfigComp.tsx index cb321f32c..aac48a09c 100644 --- a/frontend/app/[locale]/agents/components/AgentConfigComp.tsx +++ b/frontend/app/[locale]/agents/components/AgentConfigComp.tsx @@ -1,17 +1,21 @@ "use client"; -import { useState, useCallback } from "react"; +import { useState, useCallback, useEffect } from "react"; import { useTranslation } from "react-i18next"; import { App, Button, Row, Col, Flex, Tooltip, Badge, Divider } from "antd"; import CollaborativeAgent from "./agentConfig/CollaborativeAgent"; import ToolManagement from "./agentConfig/ToolManagement"; +import SkillManagement from "./agentConfig/SkillManagement"; +import SkillBuildModal from "./agentConfig/SkillBuildModal"; import { updateToolList } from "@/services/mcpService"; import { useAgentConfigStore } from "@/stores/agentConfigStore"; import { useToolList } from "@/hooks/agent/useToolList"; +import { useSkillList } from "@/hooks/agent/useSkillList"; +import { useAgentSkillInstances } from "@/hooks/agent/useAgentSkillInstances"; import McpConfigModal from "./agentConfig/McpConfigModal"; -import { RefreshCw, Lightbulb, Plug } from "lucide-react"; +import { RefreshCw, Lightbulb, Plug, BlocksIcon } from "lucide-react"; interface AgentConfigCompProps {} @@ -21,14 +25,26 @@ export default function AgentConfigComp({}: AgentConfigCompProps) { // Get state from store const currentAgentId = useAgentConfigStore((state) => state.currentAgentId); - const isCreatingMode = useAgentConfigStore((state) => state.isCreatingMode); const [isMcpModalOpen, setIsMcpModalOpen] = useState(false); + const [isSkillModalOpen, setIsSkillModalOpen] = useState(false); const [isRefreshing, setIsRefreshing] = useState(false); + const [isRefreshingSkill, setIsRefreshingSkill] = useState(false); - // Use tool list hook for data management const { groupedTools, invalidate } = useToolList(); + const { groupedSkills, invalidate: invalidateSkills } = useSkillList(); + const { skillInstances, invalidate: invalidateSkillInstances } = useAgentSkillInstances( + currentAgentId ?? null + ); + const setInitialSkills = useAgentConfigStore((state) => state.setInitialSkills); + + // Load skill instances when agent changes + useEffect(() => { + if (currentAgentId && skillInstances.length > 0) { + setInitialSkills(skillInstances); + } + }, [currentAgentId, skillInstances, setInitialSkills]); const handleRefreshTools = useCallback(async () => { setIsRefreshing(true); @@ -49,52 +65,58 @@ export default function AgentConfigComp({}: AgentConfigCompProps) { } }, [invalidate]); + const handleRefreshSkills = useCallback(async () => { + setIsRefreshingSkill(true); + try { + invalidateSkills(); + invalidateSkillInstances(); + message.success(t("skillManagement.message.refreshSuccess")); + } catch (error) { + message.error(t("skillManagement.message.refreshFailed")); + } finally { + setIsRefreshingSkill(false); + } + }, [invalidateSkills, invalidateSkillInstances]); + + const handleSkillBuildSuccess = useCallback(() => { + invalidateSkills(); + if (currentAgentId) { + invalidateSkillInstances(); + } + }, [invalidateSkills, invalidateSkillInstances, currentAgentId]); + return ( <> {/* Import handled by Ant Design Upload (no hidden input required) */} - + -

- {t("businessLogic.config.title")} -

+

{t("businessLogic.config.title")}

- + -

- {t("toolPool.title")} -

+

{t("toolPool.title")}

- {t("toolPool.tooltip.functionGuide")} -
- } + title={
{t("toolPool.tooltip.functionGuide")}
} color="#ffffff" styles={{ root: { backgroundColor: "#ffffff", border: "1px solid #e5e7eb", borderRadius: "6px", - boxShadow: - "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)", + boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)", maxWidth: "800px", minWidth: "700px", width: "fit-content", @@ -134,7 +156,7 @@ export default function AgentConfigComp({}: AgentConfigCompProps) { - + + + + + +

{t("skillPool.title")}

+
+ + + + + + + +
+ + + + + + + + - setIsMcpModalOpen(false)} + setIsMcpModalOpen(false)} /> + + setIsSkillModalOpen(false)} + onSuccess={handleSkillBuildSuccess} /> ); diff --git a/frontend/app/[locale]/agents/components/agentConfig/McpConfigModal.tsx b/frontend/app/[locale]/agents/components/agentConfig/McpConfigModal.tsx index 69c9193af..ebf3c99b5 100644 --- a/frontend/app/[locale]/agents/components/agentConfig/McpConfigModal.tsx +++ b/frontend/app/[locale]/agents/components/agentConfig/McpConfigModal.tsx @@ -597,7 +597,7 @@ export default function McpConfigModal({ onCancel={actionsLocked ? undefined : onCancel} width={1200} closable={!actionsLocked} - maskClosable={!actionsLocked} + mask={{ closable: !actionsLocked }} footer={[ + )} + + + {/* Chat messages area */} +
+ {chatMessages.length === 0 && ( +
+ {t("skillManagement.form.chatPlaceholder")} +
+ )} + {chatMessages.map((msg) => ( +
+
+ {msg.role === "assistant" && isThinkingVisible && msg.content === "" ? ( +
+ + {thinkingDescription && ( + + {thinkingDescription} + + )} +
+ ) : msg.role === "assistant" ? ( +
+ + {extractSkillGenerationResult(msg.content)} + +
+ ) : ( +
{msg.content}
+ )} +
+
+ ))} +
+ + {/* Chat input area */} +
+ +