3
3
import os
4
4
from warnings import warn
5
5
6
- from llama_index .embeddings .huggingface import HuggingFaceEmbedding
7
- from llama_index .llms .azure_openai import AzureOpenAI
8
- from llama_index .llms .ollama import Ollama
9
-
10
6
11
7
def azure_open_ai (model_id : str = "gpt-4o" , engine : str = "4o" ):
12
- """Get the Azure OpenAI model."""
8
+ """Get the Azure OpenAI model.
9
+
10
+ Parameters
11
+ ----------
12
+ model_id: str, optional, default is "gpt-4o"
13
+ The model ID.
14
+ engine: str, optional, default is "4o"
15
+ The engine.
16
+
17
+ Returns
18
+ -------
19
+ AzureOpenAI
20
+ The Azure OpenAI model.
21
+
22
+ Raises
23
+ ------
24
+ ImportError
25
+ If the `llama-index-llms-azure-openai` package is not installed.
26
+
27
+ Examples
28
+ --------
29
+ >>> from llama_utils.utils.models import azure_open_ai
30
+ >>> from dotenv import load_dotenv
31
+ >>> load_dotenv() # doctest: +SKIP
32
+ >>> llm = azure_open_ai() # doctest: +SKIP
33
+ >>> print(llm.model) # doctest: +SKIP
34
+ gpt-4o
35
+ >>> print(llm.metadata) # doctest: +SKIP
36
+ context_window=128000 num_output=-1 is_chat_model=True is_function_calling_model=True model_name='gpt-4o' system_role=<MessageRole.SYSTEM: 'system'>
37
+ """
38
+ try :
39
+ from llama_index .llms .azure_openai import AzureOpenAI
40
+ except ImportError :
41
+ raise ImportError (
42
+ "Please install the `llama-index-llms-azure-openai` package to use the Azure OpenAI model."
43
+ )
13
44
endpoint = os .environ .get ("AZURE_OPENAI_ENDPOINT" )
14
45
api_key = os .environ .get ("AZURE_OPENAI_API_KEY" )
15
46
api_version = os .environ .get ("AZURE_OPENAI_API_VERSION" )
@@ -41,14 +72,36 @@ def get_ollama_llm(model_id: str = "llama3"):
41
72
-------
42
73
Ollama
43
74
The Ollama LLM.
75
+
76
+ Raises
77
+ ------
78
+ ImportError
79
+ If the `llama-index-llms-ollama` package is not installed.
80
+
81
+ Examples
82
+ --------
83
+ >>> from llama_utils.utils.models import get_ollama_llm
84
+ >>> llm = get_ollama_llm()
85
+ >>> print(llm.model)
86
+ llama3
87
+ >>> print(llm.base_url)
88
+ http://localhost:11434
89
+ >>> print(llm.metadata)
90
+ context_window=3900 num_output=256 is_chat_model=True is_function_calling_model=True model_name='llama3' system_role=<MessageRole.SYSTEM: 'system'>
44
91
"""
92
+ try :
93
+ from llama_index .llms .ollama import Ollama
94
+ except ImportError :
95
+ raise ImportError (
96
+ "Please install the `llama-index-llms-ollama` package to use the Ollama model."
97
+ )
45
98
llm = Ollama (model = model_id , request_timeout = 360.0 )
46
99
return llm
47
100
48
101
49
102
def get_hugging_face_embedding (
50
103
model_name : str = "BAAI/bge-base-en-v1.5" , cache_folder : str = None
51
- ) -> HuggingFaceEmbedding :
104
+ ):
52
105
"""Get the hugging face embedding model.
53
106
54
107
Parameters
@@ -62,6 +115,29 @@ def get_hugging_face_embedding(
62
115
-------
63
116
HuggingFaceEmbedding
64
117
The hugging face embedding model.
118
+
119
+ Raises
120
+ ------
121
+ ImportError
122
+ If the `llama-index-embeddings-huggingface` package is not installed.
123
+
124
+ Examples
125
+ --------
126
+ >>> from llama_utils.utils.models import get_hugging_face_embedding
127
+ >>> embedding = get_hugging_face_embedding()
128
+ >>> print(embedding.model_name)
129
+ BAAI/bge-base-en-v1.5
130
+ >>> print(embedding.max_length)
131
+ 512
132
+ >>> print(embedding.embed_batch_size)
133
+ 10
65
134
"""
135
+ try :
136
+ from llama_index .embeddings .huggingface import HuggingFaceEmbedding
137
+ except ImportError :
138
+ raise ImportError (
139
+ "Please install the `llama-index-embeddings-huggingface` package to use the Hugging Face embedding model."
140
+ )
141
+
66
142
embedding = HuggingFaceEmbedding (model_name = model_name , cache_folder = cache_folder )
67
143
return embedding
0 commit comments