Skip to content

Commit 69cd97c

Browse files
committed
feat: sv_loopkup endpoint, remesh query param, reuse initial meshes for clone pcgs
1 parent 0a3232a commit 69cd97c

File tree

4 files changed

+66
-21
lines changed

4 files changed

+66
-21
lines changed

pychunkedgraph/app/segmentation/common.py

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -189,11 +189,13 @@ def handle_find_minimal_covering_nodes(table_id, is_binary=True):
189189
): # Process from higher layers to lower layers
190190
if len(node_queue[layer]) == 0:
191191
continue
192-
192+
193193
current_nodes = list(node_queue[layer])
194194

195195
# Call handle_roots to find parents
196-
parents = cg.get_roots(current_nodes, stop_layer=layer + 1, time_stamp=timestamp)
196+
parents = cg.get_roots(
197+
current_nodes, stop_layer=layer + 1, time_stamp=timestamp
198+
)
197199
unique_parents = np.unique(parents)
198200
parent_layers = np.array(
199201
[cg.get_chunk_layer(parent) for parent in unique_parents]
@@ -312,7 +314,11 @@ def str2bool(v):
312314

313315

314316
def publish_edit(
315-
table_id: str, user_id: str, result: GraphEditOperation.Result, is_priority=True
317+
table_id: str,
318+
user_id: str,
319+
result: GraphEditOperation.Result,
320+
is_priority=True,
321+
remesh: bool = True,
316322
):
317323
import pickle
318324

@@ -322,6 +328,7 @@ def publish_edit(
322328
"table_id": table_id,
323329
"user_id": user_id,
324330
"remesh_priority": "true" if is_priority else "false",
331+
"remesh": "true" if remesh else "false",
325332
}
326333
payload = {
327334
"operation_id": int(result.operation_id),
@@ -343,6 +350,7 @@ def handle_merge(table_id, allow_same_segment_merge=False):
343350

344351
nodes = json.loads(request.data)
345352
is_priority = request.args.get("priority", True, type=str2bool)
353+
remesh = request.args.get("remesh", True, type=str2bool)
346354
chebyshev_distance = request.args.get("chebyshev_distance", 3, type=int)
347355

348356
current_app.logger.debug(nodes)
@@ -391,7 +399,7 @@ def handle_merge(table_id, allow_same_segment_merge=False):
391399
current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))
392400

393401
if len(ret.new_lvl2_ids) > 0:
394-
publish_edit(table_id, user_id, ret, is_priority=is_priority)
402+
publish_edit(table_id, user_id, ret, is_priority=is_priority, remesh=remesh)
395403

396404
return ret
397405

@@ -405,6 +413,7 @@ def handle_split(table_id):
405413

406414
data = json.loads(request.data)
407415
is_priority = request.args.get("priority", True, type=str2bool)
416+
remesh = request.args.get("remesh", True, type=str2bool)
408417
mincut = request.args.get("mincut", True, type=str2bool)
409418

410419
current_app.logger.debug(data)
@@ -457,7 +466,7 @@ def handle_split(table_id):
457466
current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))
458467

459468
if len(ret.new_lvl2_ids) > 0:
460-
publish_edit(table_id, user_id, ret, is_priority=is_priority)
469+
publish_edit(table_id, user_id, ret, is_priority=is_priority, remesh=remesh)
461470

462471
return ret
463472

@@ -470,6 +479,7 @@ def handle_undo(table_id):
470479

471480
data = json.loads(request.data)
472481
is_priority = request.args.get("priority", True, type=str2bool)
482+
remesh = request.args.get("remesh", True, type=str2bool)
473483
user_id = str(g.auth_user.get("id", current_app.user_id))
474484

475485
current_app.logger.debug(data)
@@ -489,7 +499,7 @@ def handle_undo(table_id):
489499
current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))
490500

491501
if ret.new_lvl2_ids.size > 0:
492-
publish_edit(table_id, user_id, ret, is_priority=is_priority)
502+
publish_edit(table_id, user_id, ret, is_priority=is_priority, remesh=remesh)
493503

494504
return ret
495505

@@ -502,6 +512,7 @@ def handle_redo(table_id):
502512

503513
data = json.loads(request.data)
504514
is_priority = request.args.get("priority", True, type=str2bool)
515+
remesh = request.args.get("remesh", True, type=str2bool)
505516
user_id = str(g.auth_user.get("id", current_app.user_id))
506517

507518
current_app.logger.debug(data)
@@ -521,7 +532,7 @@ def handle_redo(table_id):
521532
current_app.logger.debug(("lvl2_nodes:", ret.new_lvl2_ids))
522533

523534
if ret.new_lvl2_ids.size > 0:
524-
publish_edit(table_id, user_id, ret, is_priority=is_priority)
535+
publish_edit(table_id, user_id, ret, is_priority=is_priority, remesh=remesh)
525536

526537
return ret
527538

@@ -536,6 +547,7 @@ def handle_rollback(table_id):
536547
target_user_id = request.args["user_id"]
537548

538549
is_priority = request.args.get("priority", True, type=str2bool)
550+
remesh = request.args.get("remesh", True, type=str2bool)
539551
skip_operation_ids = np.array(
540552
json.loads(request.args.get("skip_operation_ids", "[]")), dtype=np.uint64
541553
)
@@ -562,7 +574,7 @@ def handle_rollback(table_id):
562574
raise cg_exceptions.BadRequest(str(e))
563575

564576
if ret.new_lvl2_ids.size > 0:
565-
publish_edit(table_id, user_id, ret, is_priority=is_priority)
577+
publish_edit(table_id, user_id, ret, is_priority=is_priority, remesh=remesh)
566578

567579
return user_operations
568580

pychunkedgraph/app/segmentation/v1/routes.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
)
1616

1717
from pychunkedgraph.app import common as app_common
18+
from pychunkedgraph.app import app_utils
1819
from pychunkedgraph.app.app_utils import (
1920
jsonify_with_kwargs,
2021
remap_public,
@@ -626,3 +627,21 @@ def valid_nodes(table_id):
626627
resp = common.valid_nodes(table_id, is_binary=is_binary)
627628

628629
return jsonify_with_kwargs(resp, int64_as_str=int64_as_str)
630+
631+
632+
@bp.route("/table/<table_id>/supervoxel_lookup", methods=["POST"])
633+
@auth_requires_permission("admin")
634+
@remap_public(edit=False)
635+
def handle_supervoxel_lookup(table_id):
636+
int64_as_str = request.args.get("int64_as_str", default=False, type=toboolean)
637+
638+
nodes = json.loads(request.data)
639+
cg = app_utils.get_cg(table_id)
640+
node_ids = []
641+
coords = []
642+
for node in nodes:
643+
node_ids.append(node[0])
644+
coords.append(np.array(node[1:]) / cg.segmentation_resolution)
645+
646+
atomic_ids = app_utils.handle_supervoxel_id_lookup(cg, coords, node_ids)
647+
return jsonify_with_kwargs(atomic_ids, int64_as_str=int64_as_str)

pychunkedgraph/meshing/meshing_batch.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,12 @@
1+
import argparse, os
2+
import numpy as np
3+
from cloudvolume import CloudVolume
4+
from cloudfiles import CloudFiles
15
from taskqueue import TaskQueue, LocalTaskQueue
2-
import argparse
6+
37
from pychunkedgraph.graph.chunkedgraph import ChunkedGraph # noqa
4-
import numpy as np
58
from pychunkedgraph.meshing.meshing_sqs import MeshTask
9+
from pychunkedgraph.meshing import meshgen_utils # noqa
610

711
if __name__ == "__main__":
812
parser = argparse.ArgumentParser()
@@ -13,11 +17,22 @@
1317
parser.add_argument('--layer', type=int)
1418
parser.add_argument('--mip', type=int)
1519
parser.add_argument('--skip_cache', action='store_true')
20+
parser.add_argument('--overwrite', type=bool, default=False)
1621

1722
args = parser.parse_args()
1823
cache = not args.skip_cache
1924

2025
cg = ChunkedGraph(graph_id=args.cg_name)
26+
cv = CloudVolume(
27+
f"graphene://https://localhost/segmentation/table/dummy",
28+
info=meshgen_utils.get_json_info(cg),
29+
)
30+
dst = os.path.join(
31+
cv.cloudpath, cv.mesh.meta.mesh_path, "initial", str(args.layer)
32+
)
33+
cf = CloudFiles(dst)
34+
if len(list(cf.list())) > 0 and not args.overwrite:
35+
raise ValueError(f"Destination {dst} is not empty. Use `--overwrite true` to proceed anyway.")
2136

2237
chunks_arr = []
2338
for x in range(args.chunk_start[0],args.chunk_end[0]):

workers/mesh_worker.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@ def callback(payload):
2222
op_id = int(data["operation_id"])
2323
l2ids = np.array(data["new_lvl2_ids"], dtype=basetypes.NODE_ID)
2424
table_id = payload.attributes["table_id"]
25+
remesh = payload.attributes["remesh"]
26+
27+
if remesh == "false":
28+
return
2529

2630
try:
2731
cg = PCG_CACHE[table_id]
@@ -37,9 +41,12 @@ def callback(payload):
3741
)
3842

3943
try:
40-
mesh_dir = cg.meta.dataset_info["mesh"]
41-
mesh_meta = cg.meta.dataset_info["mesh_metadata"]
42-
cv_unsharded_mesh_dir = mesh_meta.get("unsharded_mesh_dir", "dynamic")
44+
mesh_meta = cg.meta.custom_data["mesh"]
45+
mesh_dir = mesh_meta["dir"]
46+
layer = mesh_meta["max_layer"]
47+
mip = mesh_meta["mip"]
48+
err = mesh_meta["max_error"]
49+
cv_unsharded_mesh_dir = mesh_meta.get("dynamic_mesh_dir", "dynamic")
4350
except KeyError:
4451
logging.warning(f"No metadata found for {cg.graph_id}; ignoring...")
4552
return
@@ -48,14 +55,6 @@ def callback(payload):
4855
cg.meta.data_source.WATERSHED, mesh_dir, cv_unsharded_mesh_dir
4956
)
5057

51-
try:
52-
mesh_data = cg.meta.custom_data["mesh"]
53-
layer = mesh_data["max_layer"]
54-
mip = mesh_data["mip"]
55-
err = mesh_data["max_error"]
56-
except KeyError:
57-
return
58-
5958

6059
logging.log(INFO_HIGH, f"remeshing {l2ids}; graph {table_id} operation {op_id}.")
6160
meshgen.remeshing(

0 commit comments

Comments
 (0)