Skip to content

Commit d3b39b7

Browse files
mathisluckajulian-risch
authored andcommitted
CI: fix format after newly introduced formatting rules from ruff release (#8696)
1 parent 35334f1 commit d3b39b7

File tree

18 files changed

+44
-54
lines changed

18 files changed

+44
-54
lines changed

haystack/components/audio/whisper_local.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def __init__(
7272
whisper_import.check()
7373
if model not in get_args(WhisperLocalModel):
7474
raise ValueError(
75-
f"Model name '{model}' not recognized. Choose one among: " f"{', '.join(get_args(WhisperLocalModel))}."
75+
f"Model name '{model}' not recognized. Choose one among: {', '.join(get_args(WhisperLocalModel))}."
7676
)
7777
self.model = model
7878
self.whisper_params = whisper_params or {}

haystack/components/converters/openapi_functions.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,7 @@ def _parse_openapi_spec(self, content: str) -> Dict[str, Any]:
249249
open_api_spec_content = yaml.safe_load(content)
250250
except yaml.YAMLError:
251251
error_message = (
252-
"Failed to parse the OpenAPI specification. "
253-
"The content does not appear to be valid JSON or YAML.\n\n"
252+
"Failed to parse the OpenAPI specification. The content does not appear to be valid JSON or YAML.\n\n"
254253
)
255254
raise RuntimeError(error_message, content)
256255

haystack/components/generators/chat/hugging_face_local.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def __init__(
148148

149149
if task not in PIPELINE_SUPPORTED_TASKS:
150150
raise ValueError(
151-
f"Task '{task}' is not supported. " f"The supported tasks are: {', '.join(PIPELINE_SUPPORTED_TASKS)}."
151+
f"Task '{task}' is not supported. The supported tasks are: {', '.join(PIPELINE_SUPPORTED_TASKS)}."
152152
)
153153
huggingface_pipeline_kwargs["task"] = task
154154

haystack/components/rankers/lost_in_the_middle.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def __init__(self, word_count_threshold: Optional[int] = None, top_k: Optional[i
5151
"""
5252
if isinstance(word_count_threshold, int) and word_count_threshold <= 0:
5353
raise ValueError(
54-
f"Invalid value for word_count_threshold: {word_count_threshold}. " f"word_count_threshold must be > 0."
54+
f"Invalid value for word_count_threshold: {word_count_threshold}. word_count_threshold must be > 0."
5555
)
5656
if isinstance(top_k, int) and top_k <= 0:
5757
raise ValueError(f"top_k must be > 0, but got {top_k}")
@@ -78,7 +78,7 @@ def run(
7878
"""
7979
if isinstance(word_count_threshold, int) and word_count_threshold <= 0:
8080
raise ValueError(
81-
f"Invalid value for word_count_threshold: {word_count_threshold}. " f"word_count_threshold must be > 0."
81+
f"Invalid value for word_count_threshold: {word_count_threshold}. word_count_threshold must be > 0."
8282
)
8383
if isinstance(top_k, int) and top_k <= 0:
8484
raise ValueError(f"top_k must be > 0, but got {top_k}")

haystack/core/component/component.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -268,9 +268,9 @@ def __call__(cls, *args, **kwargs):
268268
try:
269269
pre_init_hook.in_progress = True
270270
named_positional_args = ComponentMeta._positional_to_kwargs(cls, args)
271-
assert (
272-
set(named_positional_args.keys()).intersection(kwargs.keys()) == set()
273-
), "positional and keyword arguments overlap"
271+
assert set(named_positional_args.keys()).intersection(kwargs.keys()) == set(), (
272+
"positional and keyword arguments overlap"
273+
)
274274
kwargs.update(named_positional_args)
275275
pre_init_hook.callback(cls, kwargs)
276276
instance = super().__call__(**kwargs)
@@ -309,8 +309,8 @@ def _component_repr(component: Component) -> str:
309309
# We're explicitly ignoring the type here because we're sure that the component
310310
# has the __haystack_input__ and __haystack_output__ attributes at this point
311311
return (
312-
f'{result}\n{getattr(component, "__haystack_input__", "<invalid_input_sockets>")}'
313-
f'\n{getattr(component, "__haystack_output__", "<invalid_output_sockets>")}'
312+
f"{result}\n{getattr(component, '__haystack_input__', '<invalid_input_sockets>')}"
313+
f"\n{getattr(component, '__haystack_output__', '<invalid_output_sockets>')}"
314314
)
315315

316316

haystack/core/pipeline/draw.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:
124124
}
125125

126126
states = {
127-
comp: f"{comp}[\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\"]:::component" # noqa
127+
comp: f'{comp}["<b>{comp}</b><br><small><i>{type(data["instance"]).__name__}{optional_inputs[comp]}</i></small>"]:::component' # noqa
128128
for comp, data in graph.nodes(data=True)
129129
if comp not in ["input", "output"]
130130
}
@@ -139,11 +139,11 @@ def _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:
139139
connections_list.append(conn_string)
140140

141141
input_connections = [
142-
f"i{{&ast;}}--\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> {states[to_comp]}"
142+
f'i{{&ast;}}--"{conn_data["label"]}<br><small><i>{conn_data["conn_type"]}</i></small>"--> {states[to_comp]}'
143143
for _, to_comp, conn_data in graph.out_edges("input", data=True)
144144
]
145145
output_connections = [
146-
f"{states[from_comp]}--\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> o{{&ast;}}"
146+
f'{states[from_comp]}--"{conn_data["label"]}<br><small><i>{conn_data["conn_type"]}</i></small>"--> o{{&ast;}}'
147147
for from_comp, _, conn_data in graph.in_edges("output", data=True)
148148
]
149149
connections = "\n".join(connections_list + input_connections + output_connections)

haystack/document_stores/in_memory/document_store.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -396,8 +396,7 @@ def filter_documents(self, filters: Optional[Dict[str, Any]] = None) -> List[Doc
396396
if filters:
397397
if "operator" not in filters and "conditions" not in filters:
398398
raise ValueError(
399-
"Invalid filter syntax. See https://docs.haystack.deepset.ai/docs/metadata-filtering "
400-
"for details."
399+
"Invalid filter syntax. See https://docs.haystack.deepset.ai/docs/metadata-filtering for details."
401400
)
402401
return [doc for doc in self.storage.values() if document_matches_filter(filters=filters, document=doc)]
403402
return list(self.storage.values())
@@ -506,8 +505,7 @@ def bm25_retrieval(
506505
if filters:
507506
if "operator" not in filters:
508507
raise ValueError(
509-
"Invalid filter syntax. See https://docs.haystack.deepset.ai/docs/metadata-filtering "
510-
"for details."
508+
"Invalid filter syntax. See https://docs.haystack.deepset.ai/docs/metadata-filtering for details."
511509
)
512510
filters = {"operator": "AND", "conditions": [content_type_filter, filters]}
513511
else:
@@ -574,8 +572,7 @@ def embedding_retrieval(
574572
return []
575573
elif len(documents_with_embeddings) < len(all_documents):
576574
logger.info(
577-
"Skipping some Documents that don't have an embedding. "
578-
"To generate embeddings, use a DocumentEmbedder."
575+
"Skipping some Documents that don't have an embedding. To generate embeddings, use a DocumentEmbedder."
579576
)
580577

581578
scores = self._compute_query_embedding_similarity_scores(

haystack/marshal/yaml.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,7 @@ def marshal(self, dict_: Dict[str, Any]) -> str:
3131
return yaml.dump(dict_, Dumper=YamlDumper)
3232
except yaml.representer.RepresenterError as e:
3333
raise TypeError(
34-
"Error dumping pipeline to YAML - Ensure that all pipeline "
35-
"components only serialize basic Python types"
34+
"Error dumping pipeline to YAML - Ensure that all pipeline components only serialize basic Python types"
3635
) from e
3736

3837
def unmarshal(self, data_: Union[str, bytes, bytearray]) -> Dict[str, Any]:

haystack/utils/filters.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,7 @@ def _less_than_equal(document_value: Any, filter_value: Any) -> bool:
112112
def _in(document_value: Any, filter_value: Any) -> bool:
113113
if not isinstance(filter_value, list):
114114
msg = (
115-
f"Filter value must be a `list` when using operator 'in' or 'not in', "
116-
f"received type '{type(filter_value)}'"
115+
f"Filter value must be a `list` when using operator 'in' or 'not in', received type '{type(filter_value)}'"
117116
)
118117
raise FilterError(msg)
119118
return any(_equal(e, document_value) for e in filter_value)

haystack/utils/hf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ def resolve_hf_pipeline_kwargs(
205205
task = model_info(huggingface_pipeline_kwargs["model"], token=huggingface_pipeline_kwargs["token"]).pipeline_tag
206206

207207
if task not in supported_tasks:
208-
raise ValueError(f"Task '{task}' is not supported. " f"The supported tasks are: {', '.join(supported_tasks)}.")
208+
raise ValueError(f"Task '{task}' is not supported. The supported tasks are: {', '.join(supported_tasks)}.")
209209
huggingface_pipeline_kwargs["task"] = task
210210
return huggingface_pipeline_kwargs
211211

test/components/audio/test_whisper_local.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -190,14 +190,14 @@ def test_whisper_local_transcriber(self, test_files_path):
190190
docs = output["documents"]
191191
assert len(docs) == 3
192192

193-
assert all(
194-
word in docs[0].content.strip().lower() for word in {"content", "the", "document"}
195-
), f"Expected words not found in: {docs[0].content.strip().lower()}"
193+
assert all(word in docs[0].content.strip().lower() for word in {"content", "the", "document"}), (
194+
f"Expected words not found in: {docs[0].content.strip().lower()}"
195+
)
196196
assert test_files_path / "audio" / "this is the content of the document.wav" == docs[0].meta["audio_file"]
197197

198-
assert all(
199-
word in docs[1].content.strip().lower() for word in {"context", "answer"}
200-
), f"Expected words not found in: {docs[1].content.strip().lower()}"
198+
assert all(word in docs[1].content.strip().lower() for word in {"context", "answer"}), (
199+
f"Expected words not found in: {docs[1].content.strip().lower()}"
200+
)
201201
path = test_files_path / "audio" / "the context for this answer is here.wav"
202202
assert path.absolute() == docs[1].meta["audio_file"]
203203

test/components/converters/test_docx_file_to_document.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,9 @@ def test_run_with_table(self, test_files_path):
175175
table_index = next(i for i, part in enumerate(content_parts) if "| This | Is | Just a |" in part)
176176
# check that natural order of the document is preserved
177177
assert any("Donald Trump" in part for part in content_parts[:table_index]), "Text before table not found"
178-
assert any(
179-
"Now we are in Page 2" in part for part in content_parts[table_index + 1 :]
180-
), "Text after table not found"
178+
assert any("Now we are in Page 2" in part for part in content_parts[table_index + 1 :]), (
179+
"Text after table not found"
180+
)
181181

182182
def test_run_with_store_full_path_false(self, test_files_path):
183183
"""

test/components/embedders/test_openai_document_embedder.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -251,8 +251,8 @@ def test_run(self):
251251
assert len(doc.embedding) == 1536
252252
assert all(isinstance(x, float) for x in doc.embedding)
253253

254-
assert (
255-
"text" in result["meta"]["model"] and "ada" in result["meta"]["model"]
256-
), "The model name does not contain 'text' and 'ada'"
254+
assert "text" in result["meta"]["model"] and "ada" in result["meta"]["model"], (
255+
"The model name does not contain 'text' and 'ada'"
256+
)
257257

258258
assert result["meta"]["usage"] == {"prompt_tokens": 15, "total_tokens": 15}, "Usage information does not match"

test/components/embedders/test_openai_text_embedder.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,8 @@ def test_run(self):
130130
assert len(result["embedding"]) == 1536
131131
assert all(isinstance(x, float) for x in result["embedding"])
132132

133-
assert (
134-
"text" in result["meta"]["model"] and "ada" in result["meta"]["model"]
135-
), "The model name does not contain 'text' and 'ada'"
133+
assert "text" in result["meta"]["model"] and "ada" in result["meta"]["model"], (
134+
"The model name does not contain 'text' and 'ada'"
135+
)
136136

137137
assert result["meta"]["usage"] == {"prompt_tokens": 6, "total_tokens": 6}, "Usage information does not match"

test/components/joiners/test_document_joiner.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,6 @@ def test_test_score_norm_with_rrf(self):
302302
for i in range(len(join_results["documents"]) - 1)
303303
)
304304

305-
assert (
306-
is_sorted
307-
), "Documents are not sorted in descending order by score, there is an issue with rff ranking"
305+
assert is_sorted, (
306+
"Documents are not sorted in descending order by score, there is an issue with rff ranking"
307+
)

test/components/preprocessors/test_document_cleaner.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def test_remove_whitespaces(self):
7171
)
7272
assert len(result["documents"]) == 1
7373
assert result["documents"][0].content == (
74-
"This is a text with some words. " "" "There is a second sentence. " "" "And there is a third sentence.\f"
74+
"This is a text with some words. There is a second sentence. And there is a third sentence.\f"
7575
)
7676

7777
def test_remove_substrings(self):
@@ -210,11 +210,7 @@ def test_ascii_only(self):
210210
def test_other_document_fields_are_not_lost(self):
211211
cleaner = DocumentCleaner(keep_id=True)
212212
document = Document(
213-
content="This is a text with some words. \n"
214-
""
215-
"There is a second sentence. \n"
216-
""
217-
"And there is a third sentence.\n",
213+
content="This is a text with some words. \nThere is a second sentence. \nAnd there is a third sentence.\n",
218214
dataframe=DataFrame({"col1": [1], "col2": [2]}),
219215
blob=ByteStream.from_string("some_data"),
220216
meta={"data": 1},

test/components/routers/test_conditional_router.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -436,9 +436,9 @@ def test_router_with_optional_parameters(self):
436436

437437
# Test pipeline without path parameter
438438
result = pipe.run(data={"router": {"question": "What?"}})
439-
assert result["router"] == {
440-
"fallback": "What?"
441-
}, "Default route should work in pipeline when 'path' is not provided"
439+
assert result["router"] == {"fallback": "What?"}, (
440+
"Default route should work in pipeline when 'path' is not provided"
441+
)
442442

443443
# Test pipeline with path parameter
444444
result = pipe.run(data={"router": {"question": "What?", "path": "followup_short"}})

test/core/pipeline/features/test_run.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -823,7 +823,7 @@ def pipeline_that_has_a_component_with_only_default_inputs():
823823
"answers": [
824824
GeneratedAnswer(
825825
data="Paris",
826-
query="What " "is " "the " "capital " "of " "France?",
826+
query="What is the capital of France?",
827827
documents=[
828828
Document(
829829
id="413dccdf51a54cca75b7ed2eddac04e6e58560bd2f0caf4106a3efc023fe3651",
@@ -916,7 +916,7 @@ def fake_generator_run(self, generation_kwargs: Optional[Dict[str, Any]] = None,
916916
pipe,
917917
[
918918
PipelineRunData(
919-
inputs={"prompt_builder": {"query": "What is the capital of " "Italy?"}},
919+
inputs={"prompt_builder": {"query": "What is the capital of Italy?"}},
920920
expected_outputs={"router": {"correct_replies": ["Rome"]}},
921921
expected_run_order=["prompt_builder", "generator", "router", "prompt_builder", "generator", "router"],
922922
)

0 commit comments

Comments
 (0)