@@ -189,11 +189,13 @@ def handle_find_minimal_covering_nodes(table_id, is_binary=True):
189
189
): # Process from higher layers to lower layers
190
190
if len (node_queue [layer ]) == 0 :
191
191
continue
192
-
192
+
193
193
current_nodes = list (node_queue [layer ])
194
194
195
195
# Call handle_roots to find parents
196
- parents = cg .get_roots (current_nodes , stop_layer = layer + 1 , time_stamp = timestamp )
196
+ parents = cg .get_roots (
197
+ current_nodes , stop_layer = layer + 1 , time_stamp = timestamp
198
+ )
197
199
unique_parents = np .unique (parents )
198
200
parent_layers = np .array (
199
201
[cg .get_chunk_layer (parent ) for parent in unique_parents ]
@@ -312,7 +314,11 @@ def str2bool(v):
312
314
313
315
314
316
def publish_edit (
315
- table_id : str , user_id : str , result : GraphEditOperation .Result , is_priority = True
317
+ table_id : str ,
318
+ user_id : str ,
319
+ result : GraphEditOperation .Result ,
320
+ is_priority = True ,
321
+ remesh : bool = True ,
316
322
):
317
323
import pickle
318
324
@@ -322,6 +328,7 @@ def publish_edit(
322
328
"table_id" : table_id ,
323
329
"user_id" : user_id ,
324
330
"remesh_priority" : "true" if is_priority else "false" ,
331
+ "remesh" : "true" if remesh else "false" ,
325
332
}
326
333
payload = {
327
334
"operation_id" : int (result .operation_id ),
@@ -343,6 +350,7 @@ def handle_merge(table_id, allow_same_segment_merge=False):
343
350
344
351
nodes = json .loads (request .data )
345
352
is_priority = request .args .get ("priority" , True , type = str2bool )
353
+ remesh = request .args .get ("remesh" , True , type = str2bool )
346
354
chebyshev_distance = request .args .get ("chebyshev_distance" , 3 , type = int )
347
355
348
356
current_app .logger .debug (nodes )
@@ -391,7 +399,7 @@ def handle_merge(table_id, allow_same_segment_merge=False):
391
399
current_app .logger .debug (("lvl2_nodes:" , ret .new_lvl2_ids ))
392
400
393
401
if len (ret .new_lvl2_ids ) > 0 :
394
- publish_edit (table_id , user_id , ret , is_priority = is_priority )
402
+ publish_edit (table_id , user_id , ret , is_priority = is_priority , remesh = remesh )
395
403
396
404
return ret
397
405
@@ -405,6 +413,7 @@ def handle_split(table_id):
405
413
406
414
data = json .loads (request .data )
407
415
is_priority = request .args .get ("priority" , True , type = str2bool )
416
+ remesh = request .args .get ("remesh" , True , type = str2bool )
408
417
mincut = request .args .get ("mincut" , True , type = str2bool )
409
418
410
419
current_app .logger .debug (data )
@@ -457,7 +466,7 @@ def handle_split(table_id):
457
466
current_app .logger .debug (("lvl2_nodes:" , ret .new_lvl2_ids ))
458
467
459
468
if len (ret .new_lvl2_ids ) > 0 :
460
- publish_edit (table_id , user_id , ret , is_priority = is_priority )
469
+ publish_edit (table_id , user_id , ret , is_priority = is_priority , remesh = remesh )
461
470
462
471
return ret
463
472
@@ -470,6 +479,7 @@ def handle_undo(table_id):
470
479
471
480
data = json .loads (request .data )
472
481
is_priority = request .args .get ("priority" , True , type = str2bool )
482
+ remesh = request .args .get ("remesh" , True , type = str2bool )
473
483
user_id = str (g .auth_user .get ("id" , current_app .user_id ))
474
484
475
485
current_app .logger .debug (data )
@@ -489,7 +499,7 @@ def handle_undo(table_id):
489
499
current_app .logger .debug (("lvl2_nodes:" , ret .new_lvl2_ids ))
490
500
491
501
if ret .new_lvl2_ids .size > 0 :
492
- publish_edit (table_id , user_id , ret , is_priority = is_priority )
502
+ publish_edit (table_id , user_id , ret , is_priority = is_priority , remesh = remesh )
493
503
494
504
return ret
495
505
@@ -502,6 +512,7 @@ def handle_redo(table_id):
502
512
503
513
data = json .loads (request .data )
504
514
is_priority = request .args .get ("priority" , True , type = str2bool )
515
+ remesh = request .args .get ("remesh" , True , type = str2bool )
505
516
user_id = str (g .auth_user .get ("id" , current_app .user_id ))
506
517
507
518
current_app .logger .debug (data )
@@ -521,7 +532,7 @@ def handle_redo(table_id):
521
532
current_app .logger .debug (("lvl2_nodes:" , ret .new_lvl2_ids ))
522
533
523
534
if ret .new_lvl2_ids .size > 0 :
524
- publish_edit (table_id , user_id , ret , is_priority = is_priority )
535
+ publish_edit (table_id , user_id , ret , is_priority = is_priority , remesh = remesh )
525
536
526
537
return ret
527
538
@@ -536,6 +547,7 @@ def handle_rollback(table_id):
536
547
target_user_id = request .args ["user_id" ]
537
548
538
549
is_priority = request .args .get ("priority" , True , type = str2bool )
550
+ remesh = request .args .get ("remesh" , True , type = str2bool )
539
551
skip_operation_ids = np .array (
540
552
json .loads (request .args .get ("skip_operation_ids" , "[]" )), dtype = np .uint64
541
553
)
@@ -562,7 +574,7 @@ def handle_rollback(table_id):
562
574
raise cg_exceptions .BadRequest (str (e ))
563
575
564
576
if ret .new_lvl2_ids .size > 0 :
565
- publish_edit (table_id , user_id , ret , is_priority = is_priority )
577
+ publish_edit (table_id , user_id , ret , is_priority = is_priority , remesh = remesh )
566
578
567
579
return user_operations
568
580
0 commit comments