Skip to content

Commit d11ed32

Browse files
tweak config docs
1 parent f316e39 commit d11ed32

File tree

8 files changed

+22
-25
lines changed

8 files changed

+22
-25
lines changed

docs/api-reference/openapi.json

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

py/core/main/api/ingestion_router.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,7 @@ async def ingest_files_app(
9292
description=ingest_files_descriptions.get("ingestion_config"),
9393
),
9494
auth_user=Depends(self.service.providers.auth.auth_wrapper),
95-
response_model=WrappedIngestionResponse,
96-
):
95+
) -> WrappedIngestionResponse: # type: ignore
9796
"""
9897
Ingest files into the system.
9998
@@ -189,8 +188,7 @@ async def update_files_app(
189188
description=ingest_files_descriptions.get("ingestion_config"),
190189
),
191190
auth_user=Depends(self.service.providers.auth.auth_wrapper),
192-
response_model=WrappedUpdateResponse,
193-
):
191+
) -> WrappedUpdateResponse: # type: ignore
194192
"""
195193
Update existing files in the system.
196194
@@ -280,8 +278,7 @@ async def ingest_chunks_app(
280278
None, description=ingest_files_descriptions.get("metadata")
281279
),
282280
auth_user=Depends(self.service.providers.auth.auth_wrapper),
283-
response_model=WrappedIngestionResponse,
284-
):
281+
) -> WrappedIngestionResponse: # type: ignore
285282
"""
286283
Ingest text chunks into the system.
287284

py/core/main/api/kg_router.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ async def create_graph(
7272
description="Settings for the graph creation process.",
7373
),
7474
auth_user=Depends(self.service.providers.auth.auth_wrapper),
75-
) -> WrappedKGCreationResponse:
75+
) -> WrappedKGCreationResponse: # type: ignore
7676
"""
7777
Creating a graph on your documents. This endpoint takes input a list of document ids and KGCreationSettings. If document IDs are not provided, the graph will be created on all documents in the system.
7878
This step extracts the relevant entities and relationships from the documents and creates a graph based on the extracted information.

py/core/main/api/retrieval_router.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ async def search_app(
112112
description=search_descriptions.get("kg_search_settings"),
113113
),
114114
auth_user=Depends(self.service.providers.auth.auth_wrapper),
115-
) -> WrappedSearchResponse:
115+
) -> WrappedSearchResponse: # type: ignore
116116
"""
117117
Perform a search query on the vector database and knowledge graph.
118118
@@ -168,7 +168,7 @@ async def rag_app(
168168
description=rag_descriptions.get("include_title_if_available"),
169169
),
170170
auth_user=Depends(self.service.providers.auth.auth_wrapper),
171-
) -> WrappedRAGResponse:
171+
) -> WrappedRAGResponse: # type: ignore
172172
"""
173173
Execute a RAG (Retrieval-Augmented Generation) query.
174174
@@ -240,7 +240,7 @@ async def agent_app(
240240
),
241241
),
242242
auth_user=Depends(self.service.providers.auth.auth_wrapper),
243-
) -> WrappedRAGAgentResponse:
243+
) -> WrappedRAGAgentResponse: # type: ignore
244244
"""
245245
Implement an agent-based interaction for complex query processing.
246246

py/core/main/orchestration/hatchet/kg_workflow.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import math
55
import uuid
66

7-
from hatchet_sdk import Context, ConcurrencyLimitStrategy
7+
from hatchet_sdk import ConcurrencyLimitStrategy, Context
88

99
from core import GenerationConfig
1010
from core.base import OrchestrationProvider
@@ -41,8 +41,8 @@ class KGExtractDescribeEmbedWorkflow:
4141
def __init__(self, kg_service: KgService):
4242
self.kg_service = kg_service
4343

44-
@orchestration_provider.concurrency(
45-
max_runs=orchestration_provider.config.kg_creation_concurrency_limit,
44+
@orchestration_provider.concurrency( # type: ignore
45+
max_runs=orchestration_provider.config.kg_creation_concurrency_limit, # type: ignore
4646
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,
4747
)
4848
def concurrency(self, context) -> str:

py/tests/conftest.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def generate_random_vector_entry(
6060
generate_random_vector_entry(i, dimension) for i in range(num_entries)
6161
]
6262

63+
6364
@pytest.fixture(scope="function")
6465
def app_config():
6566
collection_id = uuid.uuid4()
@@ -68,6 +69,7 @@ def app_config():
6869

6970
return AppConfig(project_name=random_project_name)
7071

72+
7173
# Crypto
7274
@pytest.fixture(scope="function")
7375
def crypto_config(app_config):
@@ -82,9 +84,7 @@ def crypto_provider(crypto_config, app_config):
8284
# Postgres
8385
@pytest.fixture(scope="function")
8486
def db_config(app_config):
85-
return DatabaseConfig.create(
86-
provider="postgres", app=app_config
87-
)
87+
return DatabaseConfig.create(provider="postgres", app=app_config)
8888

8989

9090
@pytest.fixture(scope="function")
@@ -138,7 +138,7 @@ def auth_config(app_config):
138138
access_token_lifetime_in_minutes=15,
139139
refresh_token_lifetime_in_days=1,
140140
require_email_verification=False,
141-
app=app_config
141+
app=app_config,
142142
)
143143

144144

@@ -160,7 +160,7 @@ def litellm_provider(app_config):
160160
provider="litellm",
161161
base_model="text-embedding-3-small",
162162
base_dimension=1536,
163-
app=app_config
163+
app=app_config,
164164
)
165165
return LiteLLMEmbeddingProvider(config)
166166

py/tests/core/providers/embedding/conftest.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ def openai_provider(app_config):
2525
provider="openai",
2626
base_model="text-embedding-ada-002",
2727
base_dimension=1536,
28-
app=app_config
28+
app=app_config,
2929
)
3030
return OpenAIEmbeddingProvider(config)
3131

@@ -36,6 +36,6 @@ def ollama_provider(app_config):
3636
provider="ollama",
3737
base_model="mxbai-embed-large",
3838
base_dimension=1024,
39-
app=app_config
39+
app=app_config,
4040
)
4141
return OllamaEmbeddingProvider(config)

py/tests/core/providers/embedding/test_litellm_embedding_provider.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def litellm_provider(app_config):
2626
provider="litellm",
2727
base_model="openai/text-embedding-3-small",
2828
base_dimension=1536,
29-
app=app_config
29+
app=app_config,
3030
)
3131

3232
return LiteLLMEmbeddingProvider(config)
@@ -80,7 +80,7 @@ def test_litellm_rerank_model_not_supported(app_config):
8080
base_model="openai/text-embedding-3-small",
8181
base_dimension=1536,
8282
rerank_model="some-model",
83-
app=app_config
83+
app=app_config,
8484
)
8585
with pytest.raises(
8686
ValueError, match="does not support separate reranking"
@@ -93,7 +93,7 @@ def test_litellm_unsupported_stage(app_config):
9393
provider="litellm",
9494
base_model="openai/text-embedding-3-small",
9595
base_dimension=1536,
96-
app=app_config
96+
app=app_config,
9797
)
9898
provider = LiteLLMEmbeddingProvider(config)
9999
with pytest.raises(
@@ -110,7 +110,7 @@ async def test_litellm_async_unsupported_stage(app_config):
110110
provider="litellm",
111111
base_model="openai/text-embedding-3-small",
112112
base_dimension=1536,
113-
app=app_config
113+
app=app_config,
114114
)
115115
provider = LiteLLMEmbeddingProvider(config)
116116
with pytest.raises(

0 commit comments

Comments
 (0)