Skip to content

Commit

Permalink
fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
lievan committed Feb 3, 2025
1 parent a8ab88e commit 8d4b9a4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion ddtrace/llmobs/_evaluators/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def start(self, *args, **kwargs):
logger.debug("no evaluators configured, not starting %r", self.__class__.__name__)
return
super(EvaluatorRunner, self).start()
logger.debug("started %r to %r", self.__class__.__name__)
logger.debug("started %r", self.__class__.__name__)

def _stop_service(self) -> None:
"""
Expand Down
12 changes: 6 additions & 6 deletions tests/llmobs/test_llmobs_evaluator_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@


@pytest.fixture
def active_evaluator_runner(llmobs):
evaluator_runner = EvaluatorRunner(interval=0.01, llmobs_service=llmobs)
evaluator_runner.evaluators.append(DummyEvaluator(llmobs_service=llmobs))
def active_evaluator_runner(LLMObs):
evaluator_runner = EvaluatorRunner(interval=0.01, llmobs_service=LLMObs)
evaluator_runner.evaluators.append(DummyEvaluator(llmobs_service=LLMObs))
evaluator_runner.start()
yield evaluator_runner

Expand All @@ -46,15 +46,15 @@ def test_evaluator_runner_periodic_enqueues_eval_metric(mock_llmobs_eval_metric_
)


def test_evaluator_runner_stopped_does_not_enqueue_metric(llmobs, mock_llmobs_eval_metric_writer):
evaluator_runner = EvaluatorRunner(interval=0.1, llmobs_service=llmobs)
def test_evaluator_runner_stopped_does_not_enqueue_metric(LLMObs, mock_llmobs_eval_metric_writer):
evaluator_runner = EvaluatorRunner(interval=0.1, llmobs_service=LLMObs)
evaluator_runner.start()
evaluator_runner.enqueue({"span_id": "123", "trace_id": "1234"}, DUMMY_SPAN)
assert not evaluator_runner._buffer
assert mock_llmobs_eval_metric_writer.enqueue.call_count == 0


def test_evaluator_runner_timed_enqueues_eval_metric(llmobs, mock_llmobs_eval_metric_writer, active_evaluator_runner):
def test_evaluator_runner_timed_enqueues_eval_metric(LLMObs, mock_llmobs_eval_metric_writer, active_evaluator_runner):
active_evaluator_runner.enqueue({"span_id": "123", "trace_id": "1234"}, DUMMY_SPAN)

time.sleep(0.1)
Expand Down

0 comments on commit 8d4b9a4

Please sign in to comment.