Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Fix Typing Issues #27

Merged
merged 10 commits into from
Aug 23, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion core/graphiti.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import asyncio
import logging
import os
import typing
from datetime import datetime
from time import time
from typing import Callable
Expand Down Expand Up @@ -83,7 +84,7 @@
episode: EpisodicNode,
new_nodes: list[EntityNode],
new_edges: list[EntityEdge],
relevant_schema: dict[str, any],
relevant_schema: dict[str, typing.Any],
previous_episodes: list[EpisodicNode],
): ...

Expand Down Expand Up @@ -276,8 +277,8 @@
await asyncio.gather(*[node.save(self.driver) for node in nodes])

# re-map edge pointers so that they don't point to discard dupe nodes
extracted_edges: list[EntityEdge] = resolve_edge_pointers(extracted_edges, uuid_map)

Check notice on line 280 in core/graphiti.py

View workflow job for this annotation

GitHub Actions / mypy

Note

"List" is invariant -- see https://mypy.readthedocs.io/en/stable/common_issues.html#variance

Check notice on line 280 in core/graphiti.py

View workflow job for this annotation

GitHub Actions / mypy

Note

Consider using "Sequence" instead, which is covariant
episodic_edges: list[EpisodicEdge] = resolve_edge_pointers(episodic_edges, uuid_map)

Check notice on line 281 in core/graphiti.py

View workflow job for this annotation

GitHub Actions / mypy

Note

"List" is invariant -- see https://mypy.readthedocs.io/en/stable/common_issues.html#variance

Check notice on line 281 in core/graphiti.py

View workflow job for this annotation

GitHub Actions / mypy

Note

Consider using "Sequence" instead, which is covariant

# save episodic edges to KG
await asyncio.gather(*[edge.save(self.driver) for edge in episodic_edges])
Expand Down
4 changes: 3 additions & 1 deletion core/llm_client/client.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import typing
from abc import ABC, abstractmethod

from ..prompts.models import Message
from .config import LLMConfig


Expand All @@ -9,5 +11,5 @@ def __init__(self, config: LLMConfig):
pass

@abstractmethod
async def generate_response(self, messages: list[dict[str, str]]) -> dict[str, any]:
async def generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
pass
4 changes: 3 additions & 1 deletion core/llm_client/openai_client.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import json
import logging
import typing

from openai import AsyncOpenAI

from ..prompts.models import Message
from .client import LLMClient
from .config import LLMConfig

Expand All @@ -14,9 +16,9 @@
self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
self.model = config.model

async def generate_response(self, messages: list[dict[str, str]]) -> dict[str, any]:
async def generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
try:
response = await self.client.chat.completions.create(

Check failure on line 21 in core/llm_client/openai_client.py

View workflow job for this annotation

GitHub Actions / mypy

call-overload

No overload variant of "create" of "AsyncCompletions" matches argument types "str", "list[Message]", "float", "int", "dict[str, str]"

Check notice on line 21 in core/llm_client/openai_client.py

View workflow job for this annotation

GitHub Actions / mypy

Note

Possible overload variants:

Check notice on line 21 in core/llm_client/openai_client.py

View workflow job for this annotation

GitHub Actions / mypy

Note

def create(self, *, messages: Iterable[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionFunctionMessageParam], model: str | Literal['gpt-4o', 'gpt-4o-2024-05-13', 'gpt-4o-2024-08-06', 'gpt-4o-mini', 'gpt-4o-mini-2024-07-18', 'gpt-4-turbo', 'gpt-4-turbo-2024-04-09', 'gpt-4-0125-preview', 'gpt-4-turbo-preview', 'gpt-4-1106-preview', 'gpt-4-vision-preview', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613'], frequency_penalty: float | NotGiven | None = ..., function_call: Literal['none', 'auto'] | ChatCompletionFunctionCallOptionParam | NotGiven = ..., functions: Iterable[Function] | NotGiven = ..., logit_bias: dict[str, int] | NotGiven | None = ..., logprobs: bool | NotGiven | None = ..., max_tokens: int | NotGiven | None = ..., n: int | NotGiven | None = ..., parallel_tool_calls: bool | NotGiven = ..., presence_penalty: float | NotGiven | None = ..., response_format: ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | NotGiven = ..., seed: int | NotGiven | None = ..., service_tier: Literal['auto', 'default'] | NotGiven | None = ..., stop: str | list[str] | NotGiven | None = ..., stream: Literal[False] | NotGiven | None = ..., stream_options: ChatCompletionStreamOptionsParam | NotGiven | None = ..., temperature: float | NotGiven | None = ..., tool_choice: Literal['none', 'auto', 'required'] | ChatCompletionNamedToolChoiceParam | NotGiven = ..., tools: Iterable[ChatCompletionToolParam] | NotGiven = ..., top_logprobs: int | NotGiven | None = ..., top_p: float | NotGiven | None = ..., user: str | NotGiven = ..., extra_headers: Mapping[str, str | Omit] | None = ..., extra_query: Mapping[str, object] | None = ..., extra_body: object | None = ..., timeout: float | Timeout | NotGiven | None = ...) -> Coroutine[Any, Any, ChatCompletion]

Check notice on line 21 in core/llm_client/openai_client.py

View workflow job for this annotation

GitHub Actions / mypy

Note

def create(self, *, messages: Iterable[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionFunctionMessageParam], model: str | Literal['gpt-4o', 'gpt-4o-2024-05-13', 'gpt-4o-2024-08-06', 'gpt-4o-mini', 'gpt-4o-mini-2024-07-18', 'gpt-4-turbo', 'gpt-4-turbo-2024-04-09', 'gpt-4-0125-preview', 'gpt-4-turbo-preview', 'gpt-4-1106-preview', 'gpt-4-vision-preview', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613'], stream: Literal[True], frequency_penalty: float | NotGiven | None = ..., function_call: Literal['none', 'auto'] | ChatCompletionFunctionCallOptionParam | NotGiven = ..., functions: Iterable[Function] | NotGiven = ..., logit_bias: dict[str, int] | NotGiven | None = ..., logprobs: bool | NotGiven | None = ..., max_tokens: int | NotGiven | None = ..., n: int | NotGiven | None = ..., parallel_tool_calls: bool | NotGiven = ..., presence_penalty: float | NotGiven | None = ..., response_format: ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | NotGiven = ..., seed: int | NotGiven | None = ..., service_tier: Literal['auto', 'default'] | NotGiven | None = ..., stop: str | list[str] | NotGiven | None = ..., stream_options: ChatCompletionStreamOptionsParam | NotGiven | None = ..., temperature: float | NotGiven | None = ..., tool_choice: Literal['none', 'auto', 'required'] | ChatCompletionNamedToolChoiceParam | NotGiven = ..., tools: Iterable[ChatCompletionToolParam] | NotGiven = ..., top_logprobs: int | NotGiven | None = ..., top_p: float | NotGiven | None = ..., user: str | NotGiven = ..., extra_headers: Mapping[str, str | Omit] | None = ..., extra_query: Mapping[str, object] | None = ..., extra_body: object | None = ..., timeout: float | Timeout | NotGiven | None = ...) -> Coroutine[Any, Any, AsyncStream[ChatCompletionChunk]]

Check notice on line 21 in core/llm_client/openai_client.py

View workflow job for this annotation

GitHub Actions / mypy

Note

def create(self, *, messages: Iterable[ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam | ChatCompletionFunctionMessageParam], model: str | Literal['gpt-4o', 'gpt-4o-2024-05-13', 'gpt-4o-2024-08-06', 'gpt-4o-mini', 'gpt-4o-mini-2024-07-18', 'gpt-4-turbo', 'gpt-4-turbo-2024-04-09', 'gpt-4-0125-preview', 'gpt-4-turbo-preview', 'gpt-4-1106-preview', 'gpt-4-vision-preview', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-32k-0314', 'gpt-4-32k-0613', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-1106', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613'], stream: bool, frequency_penalty: float | NotGiven | None = ..., function_call: Literal['none', 'auto'] | ChatCompletionFunctionCallOptionParam | NotGiven = ..., functions: Iterable[Function] | NotGiven = ..., logit_bias: dict[str, int] | NotGiven | None = ..., logprobs: bool | NotGiven | None = ..., max_tokens: int | NotGiven | None = ..., n: int | NotGiven | None = ..., parallel_tool_calls: bool | NotGiven = ..., presence_penalty: float | NotGiven | None = ..., response_format: ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | NotGiven = ..., seed: int | NotGiven | None = ..., service_tier: Literal['auto', 'default'] | NotGiven | None = ..., stop: str | list[str] | NotGiven | None = ..., stream_options: ChatCompletionStreamOptionsParam | NotGiven | None = ..., temperature: float | NotGiven | None = ..., tool_choice: Literal['none', 'auto', 'required'] | ChatCompletionNamedToolChoiceParam | NotGiven = ..., tools: Iterable[ChatCompletionToolParam] | NotGiven = ..., top_logprobs: int | NotGiven | None = ..., top_p: float | NotGiven | None = ..., user: str | NotGiven = ..., extra_headers: Mapping[str, str | Omit] | None = ..., extra_query: Mapping[str, object] | None = ..., extra_body: object | None = ..., timeout: float | Timeout | NotGiven | None = ...) -> Coroutine[Any, Any, ChatCompletion | AsyncStream[ChatCompletionChunk]]
model=self.model,
messages=messages,
temperature=0.1,
Expand Down
3 changes: 2 additions & 1 deletion core/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,9 @@ class EpisodicNode(Node):
source: str = Field(description='source type')
source_description: str = Field(description='description of the data source')
content: str = Field(description='raw episode data')
valid_at: datetime = Field(
valid_at: datetime | None = Field(
description='datetime of when the original document was created',
default=None,
)
entity_edges: list[str] = Field(
description='list of entity edges referenced in this episode',
Expand Down
14 changes: 7 additions & 7 deletions core/prompts/dedupe_edges.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .models import Message, PromptFunction, PromptVersion

Expand All @@ -15,7 +15,7 @@ class Versions(TypedDict):
edge_list: PromptFunction


def v1(context: dict[str, any]) -> list[Message]:
def v1(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -34,7 +34,7 @@ def v1(context: dict[str, any]) -> list[Message]:

Task:
1. start with the list of edges from New Edges
2. If any edge in New Edges is a duplicate of an edge in Existing Edges, replace the new edge with the existing
2. If Any edge in New Edges is a duplicate of an edge in Existing Edges, replace the new edge with the existing
edge in the list
3. Respond with the resulting list of edges

Expand All @@ -55,7 +55,7 @@ def v1(context: dict[str, any]) -> list[Message]:
]


def v2(context: dict[str, any]) -> list[Message]:
def v2(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -74,7 +74,7 @@ def v2(context: dict[str, any]) -> list[Message]:

Task:
1. start with the list of edges from New Edges
2. If any edge in New Edges is a duplicate of an edge in Existing Edges, replace the new edge with the existing
2. If Any edge in New Edges is a duplicate of an edge in Existing Edges, replace the new edge with the existing
edge in the list
3. Respond with the resulting list of edges

Expand All @@ -97,7 +97,7 @@ def v2(context: dict[str, any]) -> list[Message]:
]


def edge_list(context: dict[str, any]) -> list[Message]:
def edge_list(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -112,7 +112,7 @@ def edge_list(context: dict[str, any]) -> list[Message]:
{json.dumps(context['edges'], indent=2)}

Task:
If any edge in Edges is a duplicate of another edge, return the fact of only one of the duplicate edges
If Any edge in Edges is a duplicate of another edge, return the fact of only one of the duplicate edges

Guidelines:
1. Use both the name and fact of edges to determine if they are duplicates,
Expand Down
12 changes: 6 additions & 6 deletions core/prompts/dedupe_nodes.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .models import Message, PromptFunction, PromptVersion

Expand All @@ -16,7 +16,7 @@ class Versions(TypedDict):
node_list: PromptVersion


def v1(context: dict[str, any]) -> list[Message]:
def v1(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -35,7 +35,7 @@ def v1(context: dict[str, any]) -> list[Message]:

Task:
1. start with the list of nodes from New Nodes
2. If any node in New Nodes is a duplicate of a node in Existing Nodes, replace the new node with the existing
2. If Any node in New Nodes is a duplicate of a node in Existing Nodes, replace the new node with the existing
node in the list
3. Respond with the resulting list of nodes

Expand All @@ -56,7 +56,7 @@ def v1(context: dict[str, any]) -> list[Message]:
]


def v2(context: dict[str, any]) -> list[Message]:
def v2(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -74,7 +74,7 @@ def v2(context: dict[str, any]) -> list[Message]:
{json.dumps(context['extracted_nodes'], indent=2)}

Task:
If any node in New Nodes is a duplicate of a node in Existing Nodes, add their names to the output list
If Any node in New Nodes is a duplicate of a node in Existing Nodes, add their names to the output list

Guidelines:
1. Use both the name and summary of nodes to determine if they are duplicates,
Expand All @@ -96,7 +96,7 @@ def v2(context: dict[str, any]) -> list[Message]:
]


def node_list(context: dict[str, any]) -> list[Message]:
def node_list(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand Down
6 changes: 3 additions & 3 deletions core/prompts/extract_edges.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .models import Message, PromptFunction, PromptVersion

Expand All @@ -14,7 +14,7 @@ class Versions(TypedDict):
v2: PromptFunction


def v1(context: dict[str, any]) -> list[Message]:
def v1(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand Down Expand Up @@ -70,7 +70,7 @@ def v1(context: dict[str, any]) -> list[Message]:
]


def v2(context: dict[str, any]) -> list[Message]:
def v2(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand Down
8 changes: 4 additions & 4 deletions core/prompts/extract_nodes.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import json
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .models import Message, PromptFunction, PromptVersion

Expand All @@ -16,7 +16,7 @@ class Versions(TypedDict):
v3: PromptFunction


def v1(context: dict[str, any]) -> list[Message]:
def v1(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand Down Expand Up @@ -64,7 +64,7 @@ def v1(context: dict[str, any]) -> list[Message]:
]


def v2(context: dict[str, any]) -> list[Message]:
def v2(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand Down Expand Up @@ -105,7 +105,7 @@ def v2(context: dict[str, any]) -> list[Message]:
]


def v3(context: dict[str, any]) -> list[Message]:
def v3(context: dict[str, Any]) -> list[Message]:
sys_prompt = """You are an AI assistant that extracts entity nodes from conversational text. Your primary task is to identify and extract the speaker and other significant entities mentioned in the conversation."""

user_prompt = f"""
Expand Down
6 changes: 3 additions & 3 deletions core/prompts/invalidate_edges.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .models import Message, PromptFunction, PromptVersion

Expand All @@ -11,7 +11,7 @@ class Versions(TypedDict):
v1: PromptFunction


def v1(context: dict[str, any]) -> list[Message]:
def v1(context: dict[str, Any]) -> list[Message]:
return [
Message(
role='system',
Expand All @@ -20,7 +20,7 @@ def v1(context: dict[str, any]) -> list[Message]:
Message(
role='user',
content=f"""
Based on the provided existing edges and new edges with their timestamps, determine which existing relationships, if any, should be invalidated due to contradictions or updates in the new edges.
Based on the provided existing edges and new edges with their timestamps, determine which existing relationships, if Any, should be invalidated due to contradictions or updates in the new edges.
Only mark a relationship as invalid if there is clear evidence from new edges that the relationship is no longer true.
Do not invalidate relationships merely because they weren't mentioned in new edges. You may use the current episode and previous episodes as well as the facts of each edge to understand the context of the relationships.

Expand Down
4 changes: 2 additions & 2 deletions core/prompts/lib.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Protocol, TypedDict
from typing import Any, Protocol, TypedDict

from .dedupe_edges import (
Prompt as DedupeEdgesPrompt,
Expand Down Expand Up @@ -68,7 +68,7 @@
def __init__(self, func: PromptFunction):
self.func = func

def __call__(self, context: dict[str, any]) -> list[Message]:
def __call__(self, context: dict[str, Any]) -> list[Message]:
return self.func(context)


Expand All @@ -81,7 +81,7 @@
class PromptLibraryWrapper:
def __init__(self, library: PromptLibraryImpl):
for prompt_type, versions in library.items():
setattr(self, prompt_type, PromptTypeWrapper(versions))

Check failure on line 84 in core/prompts/lib.py

View workflow job for this annotation

GitHub Actions / mypy

arg-type

Argument 1 to "PromptTypeWrapper" has incompatible type "object"; expected "dict[str, Callable[[dict[str, Any]], list[Message]]]"


PROMPT_LIBRARY_IMPL: PromptLibraryImpl = {
Expand All @@ -92,4 +92,4 @@
'invalidate_edges': invalidate_edges_versions,
}

prompt_library: PromptLibrary = PromptLibraryWrapper(PROMPT_LIBRARY_IMPL)

Check failure on line 95 in core/prompts/lib.py

View workflow job for this annotation

GitHub Actions / mypy

assignment

Incompatible types in assignment (expression has type "PromptLibraryWrapper", variable has type "PromptLibrary")
6 changes: 3 additions & 3 deletions core/prompts/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Callable, Protocol
from typing import Any, Callable, Protocol

from pydantic import BaseModel

Expand All @@ -9,7 +9,7 @@ class Message(BaseModel):


class PromptVersion(Protocol):
def __call__(self, context: dict[str, any]) -> list[Message]: ...
def __call__(self, context: dict[str, Any]) -> list[Message]: ...


PromptFunction = Callable[[dict[str, any]], list[Message]]
PromptFunction = Callable[[dict[str, Any]], list[Message]]
4 changes: 2 additions & 2 deletions core/utils/maintenance/edge_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import logging
from datetime import datetime
from time import time
from typing import List
from typing import Any, List

from core.edges import EntityEdge, EpisodicEdge
from core.llm_client import LLMClient
Expand Down Expand Up @@ -35,7 +35,7 @@ async def extract_new_edges(
llm_client: LLMClient,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should actually just remove this function instead, I deprecated wit the architecture change of splitting two prompts

episode: EpisodicNode,
new_nodes: list[EntityNode],
relevant_schema: dict[str, any],
relevant_schema: dict[str, Any],
previous_episodes: list[EpisodicNode],
) -> tuple[list[EntityEdge], list[EntityNode]]:
# Prepare context for LLM
Expand Down
5 changes: 1 addition & 4 deletions core/utils/maintenance/graph_data_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,7 @@ async def build_indices_and_constraints(driver: AsyncDriver):
}}
""",
]
index_queries: list[LiteralString] = (
range_indices + fulltext_indices + vector_indices
)
index_queries: list[LiteralString] = range_indices + fulltext_indices + vector_indices

await asyncio.gather(*[driver.execute_query(query) for query in index_queries])

Expand All @@ -72,7 +70,6 @@ async def retrieve_episodes(
driver: AsyncDriver,
reference_time: datetime,
last_n: int = EPISODE_WINDOW_LEN,
sources: list[str] | None = 'messages',
) -> list[EpisodicNode]:
"""Retrieve the last n episodic nodes from the graph"""
result = await driver.execute_query(
Expand Down
3 changes: 2 additions & 1 deletion core/utils/maintenance/node_operations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import typing
from datetime import datetime
from time import time

Expand All @@ -12,7 +13,7 @@
async def extract_new_nodes(
llm_client: LLMClient,
episode: EpisodicNode,
relevant_schema: dict[str, any],
relevant_schema: dict[str, typing.Any],
previous_episodes: list[EpisodicNode],
) -> list[EntityNode]:
# Prepare context for LLM
Expand Down
2 changes: 1 addition & 1 deletion tests/utils/maintenance/test_temporal_operations_int.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ async def test_invalidate_edges_multiple_invalidations():
async def test_invalidate_edges_no_effect():
existing_edges, nodes = create_complex_test_data()

# Create a new edge that doesn't invalidate any existing edges
# Create a new edge that doesn't invalidate Any existing edges
new_edge = (
nodes[2],
EntityEdge(
Expand Down
Loading