-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcore.py
32 lines (23 loc) · 1.15 KB
/
core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import service_dot_parser
import service_chat
# DEV note: NOT using langchain.
# Tried using langchain but it's validation gets in the way of more complex prompts.
# And seems simpler to code direct, not via a complicated framework.
def create_command_messages(expert_commands):
messages = []
for command in expert_commands:
messages.append({'role':'system', 'content': command.expert_template })
return messages
def process_response(rsp, prompt_id):
if service_dot_parser.is_dot_response(rsp):
return service_dot_parser.parse_dot_and_return_human(rsp, prompt_id)
return rsp
def execute_prompt(user_prompt, previous_messages, command_messages, prompt_id):
# TODO: Route to the right 'expert' chain
# Falls back to the default chain, which means sending the plain user prompt to the LLM
user_message = {'role':'user', 'content': user_prompt }
messages = command_messages + previous_messages + [user_message]
rsp = service_chat.send_prompt_messages(messages)
previous_messages.append(user_message)
previous_messages.append({'role':'assistant', 'content': rsp })
return process_response(rsp, prompt_id)