Skip to content

Commit

Permalink
Add as_dict to object detection (#774)
Browse files Browse the repository at this point in the history
  • Loading branch information
czaloom authored Oct 2, 2024
1 parent 9474a92 commit ecd6fa4
Show file tree
Hide file tree
Showing 9 changed files with 200 additions and 103 deletions.
80 changes: 48 additions & 32 deletions lite/tests/detection/test_average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def test_ap_metrics(

metrics = evaluator.evaluate(
iou_thresholds=[0.1, 0.6],
as_dict=True,
)

assert evaluator.ignored_prediction_labels == []
Expand All @@ -108,7 +109,7 @@ def test_ap_metrics(
assert evaluator.n_predictions == 2

# test AP
actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -149,7 +150,7 @@ def test_ap_metrics(
assert m in actual_metrics

# test mAP
actual_metrics = [m.to_dict() for m in metrics[MetricType.mAP]]
actual_metrics = [m for m in metrics[MetricType.mAP]]
expected_metrics = [
{
"type": "mAP",
Expand Down Expand Up @@ -190,9 +191,7 @@ def test_ap_metrics(
assert m in actual_metrics

# test AP Averaged Over IoUs
actual_metrics = [
m.to_dict() for m in metrics[MetricType.APAveragedOverIOUs]
]
actual_metrics = [m for m in metrics[MetricType.APAveragedOverIOUs]]
expected_metrics = [
{
"type": "APAveragedOverIOUs",
Expand All @@ -217,9 +216,7 @@ def test_ap_metrics(
assert m in actual_metrics

# test mAP Averaged Over IoUs
actual_metrics = [
m.to_dict() for m in metrics[MetricType.mAPAveragedOverIOUs]
]
actual_metrics = [m for m in metrics[MetricType.mAPAveragedOverIOUs]]
expected_metrics = [
{
"type": "mAPAveragedOverIOUs",
Expand Down Expand Up @@ -265,10 +262,11 @@ def test_ap_using_torch_metrics_example(

metrics = evaluator.evaluate(
iou_thresholds=[0.5, 0.75],
as_dict=True,
)

# test AP
actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -357,7 +355,7 @@ def test_ap_using_torch_metrics_example(
assert m in actual_metrics

# test mAP
actual_metrics = [m.to_dict() for m in metrics[MetricType.mAP]]
actual_metrics = [m for m in metrics[MetricType.mAP]]
expected_metrics = [
{
"type": "mAP",
Expand Down Expand Up @@ -393,9 +391,12 @@ def test_ap_false_negatives_single_datum_baseline(
loader = DataLoader()
loader.add_bounding_boxes(false_negatives_single_datum_baseline_detections)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -426,9 +427,12 @@ def test_ap_false_negatives_single_datum(
loader = DataLoader()
loader.add_bounding_boxes(false_negatives_single_datum_detections)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -467,9 +471,12 @@ def test_ap_false_negatives_two_datums_one_empty_low_confidence_of_fp(
false_negatives_two_datums_one_empty_low_confidence_of_fp_detections
)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -507,9 +514,12 @@ def test_ap_false_negatives_two_datums_one_empty_high_confidence_of_fp(
false_negatives_two_datums_one_empty_high_confidence_of_fp_detections
)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -547,9 +557,12 @@ def test_ap_false_negatives_two_datums_one_only_with_different_class_low_confide
false_negatives_two_datums_one_only_with_different_class_low_confidence_of_fp_detections
)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -598,9 +611,12 @@ def test_ap_false_negatives_two_datums_one_only_with_different_class_high_confid
false_negatives_two_images_one_only_with_different_class_high_confidence_of_fp_detections
)
evaluator = loader.finalize()
metrics = evaluator.evaluate(iou_thresholds=[0.5])
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down Expand Up @@ -662,9 +678,12 @@ def test_ap_ranked_pair_ordering(
"n_predictions": 4,
}

metrics = evaluator.evaluate(iou_thresholds=[0.5, 0.75])
metrics = evaluator.evaluate(
iou_thresholds=[0.5, 0.75],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"parameters": {
Expand Down Expand Up @@ -720,7 +739,7 @@ def test_ap_ranked_pair_ordering(
for m in expected_metrics:
assert m in actual_metrics

actual_metrics = [m.to_dict() for m in metrics[MetricType.mAP]]
actual_metrics = [m for m in metrics[MetricType.mAP]]
expected_metrics = [
{
"parameters": {"label_key": "class", "iou_threshold": 0.5},
Expand All @@ -738,9 +757,7 @@ def test_ap_ranked_pair_ordering(
for m in expected_metrics:
assert m in actual_metrics

actual_metrics = [
m.to_dict() for m in metrics[MetricType.APAveragedOverIOUs]
]
actual_metrics = [m for m in metrics[MetricType.APAveragedOverIOUs]]
expected_metrics = [
{
"parameters": {
Expand Down Expand Up @@ -772,9 +789,7 @@ def test_ap_ranked_pair_ordering(
for m in expected_metrics:
assert m in actual_metrics

actual_metrics = [
m.to_dict() for m in metrics[MetricType.mAPAveragedOverIOUs]
]
actual_metrics = [m for m in metrics[MetricType.mAPAveragedOverIOUs]]
expected_metrics = [
{
"parameters": {
Expand Down Expand Up @@ -812,12 +827,13 @@ def test_ap_true_positive_deassignment(
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
score_thresholds=[0.5],
as_dict=True,
)

assert len(metrics) == 14

# test AP
actual_metrics = [m.to_dict() for m in metrics[MetricType.AP]]
actual_metrics = [m for m in metrics[MetricType.AP]]
expected_metrics = [
{
"type": "AP",
Expand Down
37 changes: 17 additions & 20 deletions lite/tests/detection/test_average_recall.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def test_ar_metrics(
metrics = evaluator.evaluate(
iou_thresholds=[0.1, 0.6],
score_thresholds=[0.0],
as_dict=True,
)

assert evaluator.ignored_prediction_labels == []
Expand All @@ -112,7 +113,7 @@ def test_ar_metrics(
assert evaluator.n_predictions == 2

# test AR
actual_metrics = [m.to_dict() for m in metrics[MetricType.AR]]
actual_metrics = [m for m in metrics[MetricType.AR]]
expected_metrics = [
{
"type": "AR",
Expand All @@ -139,7 +140,7 @@ def test_ar_metrics(
assert m in actual_metrics

# test mAR
actual_metrics = [m.to_dict() for m in metrics[MetricType.mAR]]
actual_metrics = [m for m in metrics[MetricType.mAR]]
expected_metrics = [
{
"type": "mAR",
Expand All @@ -166,9 +167,7 @@ def test_ar_metrics(
assert m in actual_metrics

# test AR Averaged Over IoUs
actual_metrics = [
m.to_dict() for m in metrics[MetricType.ARAveragedOverScores]
]
actual_metrics = [m for m in metrics[MetricType.ARAveragedOverScores]]
expected_metrics = [
{
"type": "ARAveragedOverScores",
Expand All @@ -195,9 +194,7 @@ def test_ar_metrics(
assert m in actual_metrics

# test mAR Averaged Over IoUs
actual_metrics = [
m.to_dict() for m in metrics[MetricType.mARAveragedOverScores]
]
actual_metrics = [m for m in metrics[MetricType.mARAveragedOverScores]]
expected_metrics = [
{
"type": "mARAveragedOverScores",
Expand Down Expand Up @@ -249,10 +246,11 @@ def test_ar_using_torch_metrics_example(
metrics = evaluator.evaluate(
iou_thresholds=iou_thresholds,
score_thresholds=score_thresholds,
as_dict=True,
)

# test AR
actual_metrics = [m.to_dict() for m in metrics[MetricType.AR]]
actual_metrics = [m for m in metrics[MetricType.AR]]
expected_metrics = [
{
"type": "AR",
Expand Down Expand Up @@ -306,7 +304,7 @@ def test_ar_using_torch_metrics_example(
assert m in actual_metrics

# test mAR
actual_metrics = [m.to_dict() for m in metrics[MetricType.mAR]]
actual_metrics = [m for m in metrics[MetricType.mAR]]
expected_metrics = [
{
"type": "mAR",
Expand All @@ -324,9 +322,7 @@ def test_ar_using_torch_metrics_example(
assert m in actual_metrics

# test ARAveragedOverScores
actual_metrics = [
m.to_dict() for m in metrics[MetricType.ARAveragedOverScores]
]
actual_metrics = [m for m in metrics[MetricType.ARAveragedOverScores]]
expected_metrics = [
{
"type": "ARAveragedOverScores",
Expand Down Expand Up @@ -380,9 +376,7 @@ def test_ar_using_torch_metrics_example(
assert m in actual_metrics

# test mARAveragedOverScores
actual_metrics = [
m.to_dict() for m in metrics[MetricType.mARAveragedOverScores]
]
actual_metrics = [m for m in metrics[MetricType.mARAveragedOverScores]]
expected_metrics = [
{
"type": "mARAveragedOverScores",
Expand Down Expand Up @@ -418,12 +412,13 @@ def test_ar_true_positive_deassignment(
metrics = evaluator.evaluate(
iou_thresholds=[0.5],
score_thresholds=[0.5],
as_dict=True,
)

assert len(metrics) == 14

# test AR
actual_metrics = [m.to_dict() for m in metrics[MetricType.AR]]
actual_metrics = [m for m in metrics[MetricType.AR]]
expected_metrics = [
{
"type": "AR",
Expand Down Expand Up @@ -474,10 +469,12 @@ def test_ar_ranked_pair_ordering(
}

metrics = evaluator.evaluate(
iou_thresholds=[0.5, 0.75], score_thresholds=[0.0]
iou_thresholds=[0.5, 0.75],
score_thresholds=[0.0],
as_dict=True,
)

actual_metrics = [m.to_dict() for m in metrics[MetricType.AR]]
actual_metrics = [m for m in metrics[MetricType.AR]]
expected_metrics = expected_metrics = [
{
"type": "AR",
Expand Down Expand Up @@ -512,7 +509,7 @@ def test_ar_ranked_pair_ordering(
for m in expected_metrics:
assert m in actual_metrics

actual_metrics = [m.to_dict() for m in metrics[MetricType.mAR]]
actual_metrics = [m for m in metrics[MetricType.mAR]]
expected_metrics = expected_metrics = [
{
"type": "mAR",
Expand Down
Loading

0 comments on commit ecd6fa4

Please sign in to comment.