Skip to content

Commit c69e4e5

Browse files
committed
move importing llm models inside their respective functions
1 parent 9788d43 commit c69e4e5

File tree

1 file changed

+82
-6
lines changed

1 file changed

+82
-6
lines changed

src/llama_utils/utils/models.py

Lines changed: 82 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,44 @@
33
import os
44
from warnings import warn
55

6-
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7-
from llama_index.llms.azure_openai import AzureOpenAI
8-
from llama_index.llms.ollama import Ollama
9-
106

117
def azure_open_ai(model_id: str = "gpt-4o", engine: str = "4o"):
12-
"""Get the Azure OpenAI model."""
8+
"""Get the Azure OpenAI model.
9+
10+
Parameters
11+
----------
12+
model_id: str, optional, default is "gpt-4o"
13+
The model ID.
14+
engine: str, optional, default is "4o"
15+
The engine.
16+
17+
Returns
18+
-------
19+
AzureOpenAI
20+
The Azure OpenAI model.
21+
22+
Raises
23+
------
24+
ImportError
25+
If the `llama-index-llms-azure-openai` package is not installed.
26+
27+
Examples
28+
--------
29+
>>> from llama_utils.utils.models import azure_open_ai
30+
>>> from dotenv import load_dotenv
31+
>>> load_dotenv() # doctest: +SKIP
32+
>>> llm = azure_open_ai() # doctest: +SKIP
33+
>>> print(llm.model) # doctest: +SKIP
34+
gpt-4o
35+
>>> print(llm.metadata) # doctest: +SKIP
36+
context_window=128000 num_output=-1 is_chat_model=True is_function_calling_model=True model_name='gpt-4o' system_role=<MessageRole.SYSTEM: 'system'>
37+
"""
38+
try:
39+
from llama_index.llms.azure_openai import AzureOpenAI
40+
except ImportError:
41+
raise ImportError(
42+
"Please install the `llama-index-llms-azure-openai` package to use the Azure OpenAI model."
43+
)
1344
endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
1445
api_key = os.environ.get("AZURE_OPENAI_API_KEY")
1546
api_version = os.environ.get("AZURE_OPENAI_API_VERSION")
@@ -41,14 +72,36 @@ def get_ollama_llm(model_id: str = "llama3"):
4172
-------
4273
Ollama
4374
The Ollama LLM.
75+
76+
Raises
77+
------
78+
ImportError
79+
If the `llama-index-llms-ollama` package is not installed.
80+
81+
Examples
82+
--------
83+
>>> from llama_utils.utils.models import get_ollama_llm
84+
>>> llm = get_ollama_llm()
85+
>>> print(llm.model)
86+
llama3
87+
>>> print(llm.base_url)
88+
http://localhost:11434
89+
>>> print(llm.metadata)
90+
context_window=3900 num_output=256 is_chat_model=True is_function_calling_model=True model_name='llama3' system_role=<MessageRole.SYSTEM: 'system'>
4491
"""
92+
try:
93+
from llama_index.llms.ollama import Ollama
94+
except ImportError:
95+
raise ImportError(
96+
"Please install the `llama-index-llms-ollama` package to use the Ollama model."
97+
)
4598
llm = Ollama(model=model_id, request_timeout=360.0)
4699
return llm
47100

48101

49102
def get_hugging_face_embedding(
50103
model_name: str = "BAAI/bge-base-en-v1.5", cache_folder: str = None
51-
) -> HuggingFaceEmbedding:
104+
):
52105
"""Get the hugging face embedding model.
53106
54107
Parameters
@@ -62,6 +115,29 @@ def get_hugging_face_embedding(
62115
-------
63116
HuggingFaceEmbedding
64117
The hugging face embedding model.
118+
119+
Raises
120+
------
121+
ImportError
122+
If the `llama-index-embeddings-huggingface` package is not installed.
123+
124+
Examples
125+
--------
126+
>>> from llama_utils.utils.models import get_hugging_face_embedding
127+
>>> embedding = get_hugging_face_embedding()
128+
>>> print(embedding.model_name)
129+
BAAI/bge-base-en-v1.5
130+
>>> print(embedding.max_length)
131+
512
132+
>>> print(embedding.embed_batch_size)
133+
10
65134
"""
135+
try:
136+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
137+
except ImportError:
138+
raise ImportError(
139+
"Please install the `llama-index-embeddings-huggingface` package to use the Hugging Face embedding model."
140+
)
141+
66142
embedding = HuggingFaceEmbedding(model_name=model_name, cache_folder=cache_folder)
67143
return embedding

0 commit comments

Comments
 (0)