Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,24 @@ User Input → ChatContext → WebSocket → Backend ChatService
- **MCP Servers**: `config/defaults/mcp.json` and `config/overrides/mcp.json`
- **Environment**: `.env` (copy from `.env.example`)

### Prompt System (Updated 2025-11-24)
The application uses a prompt system to manage various LLM prompts:

- **System Prompt**: `prompts/system_prompt.md` - Default system prompt prepended to all conversations
- Configurable via `system_prompt_filename` in AppSettings (default: `system_prompt.md`)
- Supports `{user_email}` template variable
- Can be overridden by MCP-provided prompts
- Loaded by `PromptProvider.get_system_prompt()`
- Automatically injected by `MessageBuilder` at conversation start

- **Agent Prompts**: Used in agent loop strategies
- `prompts/agent_reason_prompt.md` - Reasoning phase
- `prompts/agent_observe_prompt.md` - Observation phase

- **Tool Synthesis**: `prompts/tool_synthesis_prompt.md` - Tool selection guidance

All prompts are loaded from the directory specified by `prompt_base_path` (default: `prompts/`). The system caches loaded prompts for performance.

### WebSocket Communication
Backend serves WebSocket at `/ws` with message types:
- `chat` - User sends message
Expand Down
2 changes: 1 addition & 1 deletion backend/application/chat/orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def __init__(
# Initialize services
self.tool_authorization = ToolAuthorizationService(tool_manager=tool_manager)
self.prompt_override = PromptOverrideService(tool_manager=tool_manager)
self.message_builder = MessageBuilder()
self.message_builder = MessageBuilder(prompt_provider=prompt_provider)

# Initialize or use provided mode runners
self.plain_mode = plain_mode or PlainModeRunner(
Expand Down
36 changes: 30 additions & 6 deletions backend/application/chat/preprocessors/message_builder.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
"""Message builder - constructs messages with history and files manifest."""

import logging
from typing import List, Dict, Any
from typing import List, Dict, Any, Optional

from domain.sessions.models import Session
from modules.prompts.prompt_provider import PromptProvider
from ..utilities import file_utils

logger = logging.getLogger(__name__)
Expand All @@ -12,10 +13,10 @@
def build_session_context(session: Session) -> Dict[str, Any]:
"""
Build session context dictionary from session.

Args:
session: Chat session

Returns:
Session context dictionary
"""
Expand All @@ -30,27 +31,50 @@ def build_session_context(session: Session) -> Dict[str, Any]:
class MessageBuilder:
"""
Service that builds complete message arrays for LLM calls.
Combines conversation history with files manifest and other context.

Combines conversation history with files manifest and system prompt.
"""

def __init__(self, prompt_provider: Optional[PromptProvider] = None):
"""
Initialize message builder.

Args:
prompt_provider: Optional prompt provider for loading system prompt
"""
self.prompt_provider = prompt_provider

async def build_messages(
self,
session: Session,
include_files_manifest: bool = True,
include_system_prompt: bool = True,
) -> List[Dict[str, Any]]:
"""
Build messages array from session history and context.

Args:
session: Current chat session
include_files_manifest: Whether to append files manifest
include_system_prompt: Whether to prepend system prompt

Returns:
List of messages ready for LLM call
"""
messages = []

# Optionally add system prompt at the beginning
if include_system_prompt and self.prompt_provider:
system_prompt = self.prompt_provider.get_system_prompt(
user_email=session.user_email
)
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
logger.debug(f"Added system prompt (len={len(system_prompt)})")

# Get conversation history from session
messages = session.history.get_messages_for_llm()
history_messages = session.history.get_messages_for_llm()
messages.extend(history_messages)

# Optionally add files manifest
if include_files_manifest:
Expand Down
1 change: 1 addition & 0 deletions backend/modules/config/config_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,7 @@ def agent_mode_available(self) -> bool:

# Prompt / template settings
prompt_base_path: str = "prompts" # Relative or absolute path to directory containing prompt templates
system_prompt_filename: str = "system_prompt.md" # Filename for system prompt template
tool_synthesis_prompt_filename: str = "tool_synthesis_prompt.md" # Filename for tool synthesis prompt template
# Agent prompts
agent_reason_prompt_filename: str = "agent_reason_prompt.md" # Filename for agent reason phase
Expand Down
18 changes: 18 additions & 0 deletions backend/modules/prompts/prompt_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,24 @@ def get_agent_observe_prompt(
logger.warning("Formatting agent observe prompt failed: %s", e)
return None

def get_system_prompt(self, user_email: Optional[str] = None) -> Optional[str]:
"""Return formatted system prompt text or None if unavailable.

Expects template placeholder: {user_email}
Missing values are rendered as empty strings.
"""
filename = self.config_manager.app_settings.system_prompt_filename
template = self._load_template(filename)
if not template:
return None
try:
return template.format(
user_email=(user_email or ""),
)
except Exception as e: # pragma: no cover
logger.warning("Formatting system prompt failed: %s", e)
return None

def clear_cache(self) -> None:
"""Clear in-memory prompt cache (e.g., after config reload)."""
self._cache.clear()
181 changes: 181 additions & 0 deletions backend/tests/test_system_prompt_loading.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
import tempfile
Copy link

Copilot AI Nov 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Import of 'tempfile' is not used.

Suggested change
import tempfile

Copilot uses AI. Check for mistakes.
import uuid
from pathlib import Path

import pytest

from modules.config import ConfigManager
from modules.prompts.prompt_provider import PromptProvider
from application.chat.preprocessors.message_builder import MessageBuilder
from domain.sessions.models import Session
from domain.messages.models import Message, MessageRole


@pytest.mark.asyncio
async def test_prompt_provider_loads_system_prompt(tmp_path):
"""Test that PromptProvider correctly loads and formats system_prompt.md"""
# Create a temporary system prompt file
prompts_dir = tmp_path / "prompts"
prompts_dir.mkdir()
system_prompt_file = prompts_dir / "system_prompt.md"
system_prompt_content = "You are a helpful assistant for user {user_email}."
system_prompt_file.write_text(system_prompt_content)

# Create a config manager with custom prompt base path
config_manager = ConfigManager()
config_manager.app_settings.prompt_base_path = str(prompts_dir)
config_manager.app_settings.system_prompt_filename = "system_prompt.md"

# Create prompt provider
prompt_provider = PromptProvider(config_manager)

# Test loading system prompt
result = prompt_provider.get_system_prompt(user_email="[email protected]")

assert result is not None
assert "[email protected]" in result
assert "helpful assistant" in result


@pytest.mark.asyncio
async def test_prompt_provider_handles_missing_system_prompt():
"""Test that PromptProvider returns None when system_prompt.md is missing"""
# Create a config manager pointing to non-existent directory
config_manager = ConfigManager()
config_manager.app_settings.prompt_base_path = "/nonexistent/path"
config_manager.app_settings.system_prompt_filename = "system_prompt.md"

# Create prompt provider
prompt_provider = PromptProvider(config_manager)

# Test loading system prompt
result = prompt_provider.get_system_prompt(user_email="[email protected]")

assert result is None


@pytest.mark.asyncio
async def test_message_builder_includes_system_prompt(tmp_path):
"""Test that MessageBuilder includes system prompt in messages"""
# Create a temporary system prompt file
prompts_dir = tmp_path / "prompts"
prompts_dir.mkdir()
system_prompt_file = prompts_dir / "system_prompt.md"
system_prompt_content = "You are a helpful assistant for user {user_email}."
system_prompt_file.write_text(system_prompt_content)

# Create a config manager with custom prompt base path
config_manager = ConfigManager()
config_manager.app_settings.prompt_base_path = str(prompts_dir)
config_manager.app_settings.system_prompt_filename = "system_prompt.md"

# Create prompt provider and message builder
prompt_provider = PromptProvider(config_manager)
message_builder = MessageBuilder(prompt_provider=prompt_provider)

# Create a session with some history
session = Session(user_email="[email protected]")
session.history.add_message(Message(role=MessageRole.USER, content="Hello"))

# Build messages
messages = await message_builder.build_messages(
session=session,
include_files_manifest=False,
include_system_prompt=True,
)

# Verify system prompt is first message
assert len(messages) >= 2 # system prompt + user message
assert messages[0]["role"] == "system"
assert "helpful assistant" in messages[0]["content"]
assert "[email protected]" in messages[0]["content"]

# Verify user message is second
assert messages[1]["role"] == "user"
assert messages[1]["content"] == "Hello"


@pytest.mark.asyncio
async def test_message_builder_without_system_prompt(tmp_path):
"""Test that MessageBuilder works without system prompt when disabled"""
# Create prompt provider without system prompt file
config_manager = ConfigManager()
config_manager.app_settings.prompt_base_path = "/nonexistent"
prompt_provider = PromptProvider(config_manager)
message_builder = MessageBuilder(prompt_provider=prompt_provider)

# Create a session with some history
session = Session(user_email="[email protected]")
session.history.add_message(Message(role=MessageRole.USER, content="Hello"))

# Build messages with system prompt disabled
messages = await message_builder.build_messages(
session=session,
include_files_manifest=False,
include_system_prompt=False,
)

# Verify no system prompt
assert len(messages) == 1
assert messages[0]["role"] == "user"
assert messages[0]["content"] == "Hello"


@pytest.mark.asyncio
async def test_system_prompt_sent_to_llm():
"""Test that system prompt is sent to LLM in chat flow"""
# Create a temporary directory for prompts
with tempfile.TemporaryDirectory() as tmp_dir:
prompts_dir = Path(tmp_dir) / "prompts"
prompts_dir.mkdir()
system_prompt_file = prompts_dir / "system_prompt.md"
system_prompt_content = "You are a helpful AI assistant for user {user_email}."
system_prompt_file.write_text(system_prompt_content)

# Create config manager
config_manager = ConfigManager()
config_manager.app_settings.prompt_base_path = str(prompts_dir)
config_manager.app_settings.system_prompt_filename = "system_prompt.md"

# Capture messages sent to LLM
captured = {}

class DummyLLM:
async def call_plain(self, model_name, messages, temperature=0.7):
captured["messages"] = messages
return "Hello! I'm here to help."

# Create chat service
from application.chat.service import ChatService

chat_service = ChatService(
llm=DummyLLM(),
tool_manager=None,
connection=None,
config_manager=config_manager,
file_manager=None,
)

# Create session and send message
session_id = uuid.uuid4()
await chat_service.handle_chat_message(
session_id=session_id,
content="Hello",
model="test-model",
user_email="[email protected]",
selected_tools=None,
selected_prompts=None,
selected_data_sources=None,
only_rag=False,
tool_choice_required=False,
agent_mode=False,
temperature=0.7,
)

# Verify system prompt was sent to LLM
msgs = captured.get("messages")
assert msgs, "LLM was not called or messages not captured"
assert len(msgs) >= 2 # system prompt + user message
assert msgs[0]["role"] == "system", f"Expected first message to be system, got: {msgs[0]}"
assert "helpful AI assistant" in msgs[0]["content"]
assert "[email protected]" in msgs[0]["content"]
Loading