Skip to content

Commit 6b1e887

Browse files
committed
Initial commit
0 parents  commit 6b1e887

9 files changed

+227
-0
lines changed

.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
.env
2+
.DS_Store

1_basic_agent.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.tools import FunctionTool
6+
7+
def multiply(a: float, b: float) -> float:
8+
"""Multiply two numbers and returns the product"""
9+
return a * b
10+
11+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
12+
13+
def add(a: float, b: float) -> float:
14+
"""Add two numbers and returns the sum"""
15+
return a + b
16+
17+
add_tool = FunctionTool.from_defaults(fn=add)
18+
19+
llm = OpenAI(model="gpt-3.5-turbo",temperature=0)
20+
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
21+
22+
response = agent.chat("What is 20+(2*4)? Use a tool to calculate every step.")
23+
24+
print(response)

2_local_agent.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.ollama import Ollama
5+
from llama_index.core.tools import FunctionTool
6+
7+
def multiply(a: float, b: float) -> float:
8+
"""Multiply two numbers and returns the product"""
9+
return a * b
10+
11+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
12+
13+
def add(a: float, b: float) -> float:
14+
"""Add two numbers and returns the sum"""
15+
return a + b
16+
17+
add_tool = FunctionTool.from_defaults(fn=add)
18+
19+
llm = Ollama(model="mixtral:8x7b", request_timeout=120.0)
20+
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
21+
22+
response = agent.chat("What is 20+(2*4)? Calculate step by step.")
23+
24+
print(response)

3_rag_agent.py

+44
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.tools import FunctionTool
6+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
7+
from llama_index.core.tools import QueryEngineTool
8+
9+
# settings
10+
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0)
11+
12+
# function tools
13+
def multiply(a: float, b: float) -> float:
14+
"""Multiply two numbers and returns the product"""
15+
return a * b
16+
17+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
18+
19+
def add(a: float, b: float) -> float:
20+
"""Add two numbers and returns the sum"""
21+
return a + b
22+
23+
add_tool = FunctionTool.from_defaults(fn=add)
24+
25+
# rag pipeline
26+
documents = SimpleDirectoryReader("./data").load_data()
27+
index = VectorStoreIndex.from_documents(documents)
28+
query_engine = index.as_query_engine()
29+
30+
# response = query_engine.query("What was the total amount of the 2023 Canadian federal budget?")
31+
# print(response)
32+
33+
# rag pipeline as a tool
34+
budget_tool = QueryEngineTool.from_defaults(
35+
query_engine,
36+
name="canadian_budget_2023",
37+
description="A RAG engine with some basic facts about the 2023 Canadian federal budget."
38+
)
39+
40+
agent = ReActAgent.from_tools([multiply_tool, add_tool, budget_tool], verbose=True)
41+
42+
response = agent.chat("What is the total amount of the 2023 Canadian federal budget multiplied by 3? Go step by step, using a tool to do any math.")
43+
44+
print(response)

4_llamaparse.py

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.tools import FunctionTool
6+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
7+
from llama_parse import LlamaParse
8+
9+
# settings
10+
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0)
11+
12+
# function tools
13+
def multiply(a: float, b: float) -> float:
14+
"""Multiply two numbers and returns the product"""
15+
return a * b
16+
17+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
18+
19+
def add(a: float, b: float) -> float:
20+
"""Add two numbers and returns the sum"""
21+
return a + b
22+
23+
add_tool = FunctionTool.from_defaults(fn=add)
24+
25+
# rag pipeline
26+
documents = SimpleDirectoryReader("./data").load_data()
27+
index = VectorStoreIndex.from_documents(documents)
28+
query_engine = index.as_query_engine()
29+
30+
response = query_engine.query("How much exactly was allocated to a tax credit to promote investment in green technologies in the 2023 Canadian federal budget?")
31+
print(response)
32+
33+
documents2 = LlamaParse(result_type="markdown").load_data("./data/2023_canadian_budget.pdf")
34+
index2 = VectorStoreIndex.from_documents(documents2)
35+
query_engine2 = index2.as_query_engine()
36+
37+
response2 = query_engine2.query("How much exactly was allocated to a tax credit to promote investment in green technologies in the 2023 Canadian federal budget?")
38+
print(response2)

5_memory.py

+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.tools import FunctionTool
6+
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
7+
from llama_parse import LlamaParse
8+
from llama_index.core.tools import QueryEngineTool
9+
10+
# settings
11+
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0)
12+
13+
# function tools
14+
def multiply(a: float, b: float) -> float:
15+
"""Multiply two numbers and returns the product"""
16+
return a * b
17+
18+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
19+
20+
def add(a: float, b: float) -> float:
21+
"""Add two numbers and returns the sum"""
22+
return a + b
23+
24+
add_tool = FunctionTool.from_defaults(fn=add)
25+
26+
# rag pipeline
27+
documents = LlamaParse(result_type="markdown").load_data("./data/2023_canadian_budget.pdf")
28+
index = VectorStoreIndex.from_documents(documents)
29+
query_engine = index.as_query_engine()
30+
31+
budget_tool = QueryEngineTool.from_defaults(
32+
query_engine,
33+
name="canadian_budget_2023",
34+
description="A RAG engine with some basic facts about the 2023 Canadian federal budget. Ask natural-language questions about the budget."
35+
)
36+
37+
agent = ReActAgent.from_tools([multiply_tool, add_tool, budget_tool], verbose=True)
38+
39+
response = agent.chat("How much exactly was allocated to a tax credit to promote investment in green technologies in the 2023 Canadian federal budget?")
40+
41+
print(response)
42+
43+
response = agent.chat("How much was allocated to a implement a means-tested dental care program in the 2023 Canadian federal budget?")
44+
45+
print(response)
46+
47+
response = agent.chat("How much was the total of those two allocations added together? Use a tool to answer any questions.")
48+
49+
print(response)

6_tools.py

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from dotenv import load_dotenv
2+
load_dotenv()
3+
from llama_index.core.agent import ReActAgent
4+
from llama_index.llms.openai import OpenAI
5+
from llama_index.core.tools import FunctionTool
6+
from llama_index.core import Settings
7+
from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
8+
9+
# settings
10+
Settings.llm = OpenAI(model="gpt-4o",temperature=0)
11+
12+
# function tools
13+
def multiply(a: float, b: float) -> float:
14+
"""Multiply two numbers and returns the product"""
15+
return a * b
16+
17+
multiply_tool = FunctionTool.from_defaults(fn=multiply)
18+
19+
def add(a: float, b: float) -> float:
20+
"""Add two numbers and returns the sum"""
21+
return a + b
22+
23+
add_tool = FunctionTool.from_defaults(fn=add)
24+
25+
finance_tools = YahooFinanceToolSpec().to_tool_list()
26+
finance_tools.extend([multiply_tool, add_tool])
27+
28+
agent = ReActAgent.from_tools(finance_tools, verbose=True)
29+
30+
response = agent.chat("What is the current price of NVDA?")
31+
32+
print(response)

data/2023_canadian_budget.pdf

367 KB
Binary file not shown.

pyproject.toml

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
[tool.poetry]
2+
name = "agents-tutorial"
3+
version = "0.1.0"
4+
description = ""
5+
authors = ["Laurie Voss <[email protected]>"]
6+
readme = "README.md"
7+
8+
[tool.poetry.dependencies]
9+
python = "^3.11"
10+
11+
12+
[build-system]
13+
requires = ["poetry-core"]
14+
build-backend = "poetry.core.masonry.api"

0 commit comments

Comments
 (0)