20
20
save_numpy_class_arrays_to_zarr,
21
21
)
22
22
23
- ERROR_TOLERANCE = 1e-4
23
+ ERROR_TOLERANCE = 0.1
24
24
25
25
26
26
# %%
27
27
@pytest.fixture(scope="session")
28
28
def setup_temp_path(tmp_path_factory):
29
- # temp_dir = tmp_path_factory.mktemp("shared_test_dir")
30
- temp_dir = (REPO_ROOT / "tests" / "tmp").absolute() # For debugging
29
+ temp_dir = tmp_path_factory.mktemp("shared_test_dir")
30
+ # temp_dir = (REPO_ROOT / "tests" / "tmp").absolute() # For debugging
31
31
os.environ["TEST_TMP_DIR"] = str(temp_dir)
32
32
yield temp_dir
33
33
# Cleanup: Unset the environment variable after tests are done
@@ -57,7 +57,7 @@ def test_fetch_data(setup_temp_path):
57
57
58
58
os.makedirs(setup_temp_path / "data", exist_ok=True)
59
59
fetch_data_cli.callback(
60
- crops="116,234 ",
60
+ crops="116,118 ",
61
61
raw_padding=0,
62
62
dest=setup_temp_path / "data",
63
63
access_mode="append",
@@ -197,8 +197,6 @@ def test_evaluate(setup_temp_path, scale, iou, accuracy):
197
197
for crop in truth_zarr.keys():
198
198
crop_zarr = truth_zarr[crop]
199
199
submission_zarr.create_group(crop)
200
- labels = []
201
- preds = []
202
200
for label in crop_zarr.keys():
203
201
label_zarr = crop_zarr[label]
204
202
attrs = label_zarr.attrs.asdict()
@@ -212,19 +210,24 @@ def test_evaluate(setup_temp_path, scale, iou, accuracy):
212
210
213
211
if scale:
214
212
pred = rescale(pred, scale, order=0, preserve_range=True)
215
- attrs["voxel_size"] = [s / scale for s in attrs["voxel_size"]]
216
-
217
- labels.append(label)
218
- preds.append(pred)
219
-
220
- save_numpy_class_arrays_to_zarr(
221
- SUBMISSION_PATH,
222
- crop,
223
- labels,
224
- preds,
225
- overwrite=True,
226
- attrs=attrs,
227
- )
213
+ old_voxel_size = attrs["voxel_size"]
214
+ new_voxel_size = [s / scale for s in attrs["voxel_size"]]
215
+ attrs["voxel_size"] = new_voxel_size
216
+ # Adjust the translation
217
+ attrs["translation"] = [
218
+ t + (n - o) / 2
219
+ for t, o, n in zip(
220
+ attrs["translation"], old_voxel_size, new_voxel_size
221
+ )
222
+ ]
223
+
224
+ save_numpy_class_arrays_to_zarr(
225
+ SUBMISSION_PATH,
226
+ crop,
227
+ [label],
228
+ [pred],
229
+ attrs=attrs,
230
+ )
228
231
else:
229
232
SUBMISSION_PATH = TRUTH_PATH
230
233
zip_submission(SUBMISSION_PATH)
@@ -245,20 +248,24 @@ def test_evaluate(setup_temp_path, scale, iou, accuracy):
245
248
1 - results["overall_score"] < ERROR_TOLERANCE
246
249
), f"Overall score should be 1 but is: {results['overall_score']}"
247
250
else:
248
- assert (
249
- np.abs((iou or 1) - results["overall_semantic_score"]) < ERROR_TOLERANCE
250
- ), f"Semantic score should be {(iou or 1)} but is: {results['overall_semantic_score']}"
251
- # Check all accuracy scores
251
+ # Check all accuracy scores and ious
252
252
for label, scores in results["label_scores"].items():
253
253
if label in INSTANCE_CLASSES:
254
254
assert (
255
255
np.abs((accuracy or 1) - scores["accuracy"]) < ERROR_TOLERANCE
256
256
), f"Accuracy score for {label} should be {(accuracy or 1)} but is: {scores['accuracy']}"
257
+ else:
258
+ assert (
259
+ np.abs((iou or 1) - scores["iou"]) < ERROR_TOLERANCE
260
+ ), f"IoU score for {label} should be {(iou or 1)} but is: {scores['iou']}"
257
261
258
262
259
263
# %%
260
264
261
265
266
+ def get_scaled_test_label(): ...
267
+
268
+
262
269
def simulate_predictions_iou(true_labels, iou):
263
270
# TODO: Add false positives (only makes false negatives currently)
264
271
0 commit comments