Skip to content

Commit 36a83eb

Browse files
committed
Updated for new agent tutorial
1 parent 6b1e887 commit 36a83eb

12 files changed

+384
-196
lines changed

1_basic_agent.py

+14-9
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,29 @@
11
from dotenv import load_dotenv
22
load_dotenv()
3-
from llama_index.core.agent import ReActAgent
3+
44
from llama_index.llms.openai import OpenAI
5-
from llama_index.core.tools import FunctionTool
5+
from llama_index.core.agent.workflow import AgentWorkflow
66

77
def multiply(a: float, b: float) -> float:
88
"""Multiply two numbers and returns the product"""
99
return a * b
1010

11-
multiply_tool = FunctionTool.from_defaults(fn=multiply)
12-
1311
def add(a: float, b: float) -> float:
1412
"""Add two numbers and returns the sum"""
1513
return a + b
1614

17-
add_tool = FunctionTool.from_defaults(fn=add)
15+
llm = OpenAI(model="gpt-4o-mini")
1816

19-
llm = OpenAI(model="gpt-3.5-turbo",temperature=0)
20-
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
17+
workflow = AgentWorkflow.from_tools_or_functions(
18+
[multiply, add],
19+
llm=llm,
20+
system_prompt="You are an agent that can perform basic mathematical operations using tools."
21+
)
2122

22-
response = agent.chat("What is 20+(2*4)? Use a tool to calculate every step.")
23+
async def main():
24+
response = await workflow.run(user_msg="What is 20+(2*4)?")
25+
print(response)
2326

24-
print(response)
27+
if __name__ == "__main__":
28+
import asyncio
29+
asyncio.run(main())

2_local_agent.py

-24
This file was deleted.

2_tools.py

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.agent.workflow import AgentWorkflow
6+
from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
7+
8+
def multiply(a: float, b: float) -> float:
9+
"""Multiply two numbers and returns the product"""
10+
return a * b
11+
12+
def add(a: float, b: float) -> float:
13+
"""Add two numbers and returns the sum"""
14+
return a + b
15+
16+
llm = OpenAI(model="gpt-4o-mini")
17+
18+
finance_tools = YahooFinanceToolSpec().to_tool_list()
19+
finance_tools.extend([multiply, add])
20+
21+
workflow = AgentWorkflow.from_tools_or_functions(
22+
finance_tools,
23+
llm=llm,
24+
system_prompt="You are an agent that can perform basic mathematical operations using tools."
25+
)
26+
27+
async def main():
28+
response = await workflow.run(user_msg="What's the current stock price of NVIDIA?")
29+
print(response)
30+
31+
if __name__ == "__main__":
32+
import asyncio
33+
asyncio.run(main())

3_rag_agent.py

-44
This file was deleted.

3_state.py

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.agent.workflow import AgentWorkflow
6+
from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
7+
from llama_index.core.workflow import Context
8+
from llama_index.core.workflow import JsonPickleSerializer, JsonSerializer
9+
10+
def multiply(a: float, b: float) -> float:
11+
"""Multiply two numbers and returns the product"""
12+
return a * b
13+
14+
def add(a: float, b: float) -> float:
15+
"""Add two numbers and returns the sum"""
16+
return a + b
17+
18+
llm = OpenAI(model="gpt-4o-mini")
19+
20+
finance_tools = YahooFinanceToolSpec().to_tool_list()
21+
finance_tools.extend([multiply, add])
22+
23+
workflow = AgentWorkflow.from_tools_or_functions(
24+
finance_tools,
25+
llm=llm,
26+
system_prompt="You are an agent that can perform basic mathematical operations using tools."
27+
)
28+
29+
# configure a context to work with our workflow
30+
ctx = Context(workflow)
31+
32+
async def main():
33+
response = await workflow.run(user_msg="Hi, my name is Laurie!",ctx=ctx)
34+
print(response)
35+
36+
response2 = await workflow.run(user_msg="What's my name?",ctx=ctx)
37+
print(response2)
38+
39+
# convert our Context to a dictionary object
40+
ctx_dict = ctx.to_dict(serializer=JsonSerializer())
41+
42+
# create a new Context from the dictionary
43+
restored_ctx = Context.from_dict(
44+
workflow, ctx_dict, serializer=JsonSerializer()
45+
)
46+
47+
response3 = await workflow.run(user_msg="What's my name?",ctx=restored_ctx)
48+
print(response3)
49+
50+
if __name__ == "__main__":
51+
import asyncio
52+
asyncio.run(main())

3a_tools_and_state.py

+41
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.agent.workflow import AgentWorkflow
6+
from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
7+
from llama_index.core.workflow import Context
8+
9+
llm = OpenAI(model="gpt-4o-mini")
10+
11+
async def set_name(ctx: Context, name: str) -> str:
12+
state = await ctx.get("state")
13+
state["name"] = name
14+
await ctx.set("state", state)
15+
return f"Name set to {name}"
16+
17+
workflow = AgentWorkflow.from_tools_or_functions(
18+
[set_name],
19+
llm=llm,
20+
system_prompt="You are a helpful assistant that can set a name.",
21+
initial_state={"name": "unset"},
22+
)
23+
24+
async def main():
25+
ctx = Context(workflow)
26+
27+
# check if it knows a name before setting it
28+
response = await workflow.run(user_msg="What's my name?", ctx=ctx)
29+
print(str(response))
30+
31+
# set the name using a tool
32+
response2 = await workflow.run(user_msg="My name is Laurie", ctx=ctx)
33+
print(str(response2))
34+
35+
# retrieve the value from the state directly
36+
state = await ctx.get("state")
37+
print("Name as stored in state: ",state["name"])
38+
39+
if __name__ == "__main__":
40+
import asyncio
41+
asyncio.run(main())

4_llamaparse.py

-38
This file was deleted.

4_streaming.py

+51
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.agent.workflow import AgentWorkflow
6+
from llama_index.core.workflow import Context
7+
from llama_index.tools.tavily_research import TavilyToolSpec
8+
import os
9+
from llama_index.core.agent.workflow import (
10+
AgentInput,
11+
AgentOutput,
12+
ToolCall,
13+
ToolCallResult,
14+
AgentStream,
15+
)
16+
17+
llm = OpenAI(model="gpt-4o-mini")
18+
19+
tavily_tool = TavilyToolSpec( api_key=os.getenv("TAVILY_API_KEY") )
20+
21+
workflow = AgentWorkflow.from_tools_or_functions(
22+
tavily_tool.to_tool_list(),
23+
llm=llm,
24+
system_prompt="You're a helpful assistant that can search the web for information."
25+
)
26+
27+
async def main():
28+
handler = workflow.run(user_msg="What's the weather like in San Francisco?")
29+
30+
# handle streaming output
31+
async for event in handler.stream_events():
32+
if isinstance(event, AgentStream):
33+
print(event.delta, end="", flush=True)
34+
elif isinstance(event, AgentInput):
35+
print("Agent input: ", event.input) # the current input messages
36+
print("Agent name:", event.current_agent_name) # the current agent name
37+
elif isinstance(event, AgentOutput):
38+
print("Agent output: ", event.response) # the current full response
39+
print("Tool calls made: ", event.tool_calls) # the selected tool calls, if any
40+
print("Raw LLM response: ", event.raw) # the raw llm api response
41+
elif isinstance(event, ToolCallResult):
42+
print("Tool called: ", event.tool_name) # the tool name
43+
print("Arguments to the tool: ", event.tool_kwargs) # the tool kwargs
44+
print("Tool output: ", event.tool_output) # the tool output
45+
46+
# print final output
47+
print(str(await handler))
48+
49+
if __name__ == "__main__":
50+
import asyncio
51+
asyncio.run(main())

5_human_in_the_loop.py

+64
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.agent.workflow import AgentWorkflow
6+
from llama_index.core.workflow import Context
7+
from llama_index.core.workflow import (
8+
InputRequiredEvent,
9+
HumanResponseEvent,
10+
)
11+
12+
llm = OpenAI(model="gpt-4o-mini")
13+
14+
# a tool that performs a dangerous task
15+
async def dangerous_task(ctx: Context) -> str:
16+
"""A dangerous task that requires human confirmation."""
17+
18+
# emit an event to the external stream to be captured
19+
ctx.write_event_to_stream(
20+
InputRequiredEvent(
21+
prefix="Are you sure you want to proceed? ",
22+
user_name="Laurie",
23+
)
24+
)
25+
26+
# wait until we see a HumanResponseEvent
27+
response = await ctx.wait_for_event(
28+
HumanResponseEvent, requirements={"user_name": "Laurie"}
29+
)
30+
31+
# act on the input from the event
32+
if response.response.strip().lower() == "yes":
33+
return "Dangerous task completed successfully."
34+
else:
35+
return "Dangerous task aborted."
36+
37+
workflow = AgentWorkflow.from_tools_or_functions(
38+
[dangerous_task],
39+
llm=llm,
40+
system_prompt="You are a helpful assistant that can perform dangerous tasks.",
41+
)
42+
async def main():
43+
handler = workflow.run(user_msg="I want to proceed with the dangerous task.")
44+
45+
async for event in handler.stream_events():
46+
# capture InputRequiredEvent
47+
if isinstance(event, InputRequiredEvent):
48+
# capture keyboard input
49+
response = input(event.prefix)
50+
# send our response back
51+
handler.ctx.send_event(
52+
HumanResponseEvent(
53+
response=response,
54+
user_name=event.user_name,
55+
)
56+
)
57+
58+
response = await handler
59+
print(str(response))
60+
61+
62+
if __name__ == "__main__":
63+
import asyncio
64+
asyncio.run(main())

0 commit comments

Comments
 (0)