3
3
import tempfile
4
4
import time
5
5
import unittest
6
+ import uuid
6
7
7
8
import backend as F
8
9
import dgl
@@ -121,8 +122,9 @@ def start_dist_dataloader(
121
122
num_nodes_to_sample = 202
122
123
batch_size = 32
123
124
train_nid = th .arange (num_nodes_to_sample )
125
+ graph_name = os .path .splitext (os .path .basename (part_config ))[0 ]
124
126
dist_graph = DistGraph (
125
- "test_sampling" ,
127
+ graph_name ,
126
128
gpb = gpb ,
127
129
part_config = part_config ,
128
130
)
@@ -204,17 +206,17 @@ def test_standalone():
204
206
print (g .idtype )
205
207
num_parts = 1
206
208
num_hops = 1
207
-
209
+ graph_name = f"graph_ { uuid . uuid4 () } "
208
210
orig_nid , orig_eid = partition_graph (
209
211
g ,
210
- "test_sampling" ,
212
+ graph_name ,
211
213
num_parts ,
212
214
test_dir ,
213
215
num_hops = num_hops ,
214
216
part_method = "metis" ,
215
217
return_mapping = True ,
216
218
)
217
- part_config = os .path .join (test_dir , "test_sampling .json" )
219
+ part_config = os .path .join (test_dir , f" { graph_name } .json" )
218
220
os .environ ["DGL_DIST_MODE" ] = "standalone"
219
221
try :
220
222
start_dist_dataloader (
@@ -243,7 +245,8 @@ def start_dist_neg_dataloader(
243
245
_ , _ , _ , gpb , _ , _ , _ = load_partition (part_config , rank )
244
246
num_edges_to_sample = 202
245
247
batch_size = 32
246
- dist_graph = DistGraph ("test_mp" , gpb = gpb , part_config = part_config )
248
+ graph_name = os .path .splitext (os .path .basename (part_config ))[0 ]
249
+ dist_graph = DistGraph (graph_name , gpb = gpb , part_config = part_config )
247
250
assert len (dist_graph .ntypes ) == len (groundtruth_g .ntypes )
248
251
assert len (dist_graph .etypes ) == len (groundtruth_g .etypes )
249
252
if len (dist_graph .etypes ) == 1 :
@@ -304,16 +307,17 @@ def check_neg_dataloader(g, num_server, num_workers):
304
307
305
308
num_parts = num_server
306
309
num_hops = 1
310
+ graph_name = f"graph_{ uuid .uuid4 ()} "
307
311
orig_nid , orig_eid = partition_graph (
308
312
g ,
309
- "test_sampling" ,
313
+ graph_name ,
310
314
num_parts ,
311
315
test_dir ,
312
316
num_hops = num_hops ,
313
317
part_method = "metis" ,
314
318
return_mapping = True ,
315
319
)
316
- part_config = os .path .join (test_dir , "test_sampling .json" )
320
+ part_config = os .path .join (test_dir , f" { graph_name } .json" )
317
321
if not isinstance (orig_nid , dict ):
318
322
orig_nid = {g .ntypes [0 ]: orig_nid }
319
323
if not isinstance (orig_eid , dict ):
@@ -380,10 +384,10 @@ def test_dist_dataloader(num_server, num_workers, use_graphbolt, return_eids):
380
384
g = CitationGraphDataset ("cora" )[0 ]
381
385
num_parts = num_server
382
386
num_hops = 1
383
-
387
+ graph_name = f"graph_ { uuid . uuid4 () } "
384
388
orig_nid , orig_eid = partition_graph (
385
389
g ,
386
- "test_sampling" ,
390
+ graph_name ,
387
391
num_parts ,
388
392
test_dir ,
389
393
num_hops = num_hops ,
@@ -393,7 +397,7 @@ def test_dist_dataloader(num_server, num_workers, use_graphbolt, return_eids):
393
397
store_eids = return_eids ,
394
398
)
395
399
396
- part_config = os .path .join (test_dir , "test_sampling .json" )
400
+ part_config = os .path .join (test_dir , f" { graph_name } .json" )
397
401
pserver_list = []
398
402
ctx = mp .get_context ("spawn" )
399
403
for i in range (num_server ):
@@ -461,8 +465,9 @@ def start_node_dataloader(
461
465
_ , _ , _ , gpb , _ , _ , _ = load_partition (part_config , rank )
462
466
num_nodes_to_sample = 202
463
467
batch_size = 32
468
+ graph_name = os .path .splitext (os .path .basename (part_config ))[0 ]
464
469
dist_graph = DistGraph (
465
- "test_sampling" ,
470
+ graph_name ,
466
471
gpb = gpb ,
467
472
part_config = part_config ,
468
473
)
@@ -580,7 +585,8 @@ def start_edge_dataloader(
580
585
_ , _ , _ , gpb , _ , _ , _ = load_partition (part_config , rank )
581
586
num_edges_to_sample = 202
582
587
batch_size = 32
583
- dist_graph = DistGraph ("test_sampling" , gpb = gpb , part_config = part_config )
588
+ graph_name = os .path .splitext (os .path .basename (part_config ))[0 ]
589
+ dist_graph = DistGraph (graph_name , gpb = gpb , part_config = part_config )
584
590
assert len (dist_graph .ntypes ) == len (groundtruth_g .ntypes )
585
591
assert len (dist_graph .etypes ) == len (groundtruth_g .etypes )
586
592
if len (dist_graph .etypes ) == 1 :
@@ -767,9 +773,10 @@ def check_dataloader(
767
773
768
774
num_parts = num_server
769
775
num_hops = 1
776
+ graph_name = f"graph_{ uuid .uuid4 ()} "
770
777
orig_nid , orig_eid = partition_graph (
771
778
g ,
772
- "test_sampling" ,
779
+ graph_name ,
773
780
num_parts ,
774
781
test_dir ,
775
782
num_hops = num_hops ,
@@ -778,7 +785,7 @@ def check_dataloader(
778
785
use_graphbolt = use_graphbolt ,
779
786
store_eids = return_eids ,
780
787
)
781
- part_config = os .path .join (test_dir , "test_sampling .json" )
788
+ part_config = os .path .join (test_dir , f" { graph_name } .json" )
782
789
if not isinstance (orig_nid , dict ):
783
790
orig_nid = {g .ntypes [0 ]: orig_nid }
784
791
if not isinstance (orig_eid , dict ):
@@ -900,7 +907,6 @@ def test_dataloader_homograph(
900
907
)
901
908
902
909
903
- @unittest .skip (reason = "Skip due to glitch in CI" )
904
910
@pytest .mark .parametrize ("num_workers" , [0 ])
905
911
@pytest .mark .parametrize ("use_graphbolt" , [False , True ])
906
912
@pytest .mark .parametrize ("exclude" , [None , "self" , "reverse_id" ])
@@ -950,7 +956,6 @@ def test_dataloader_homograph_prob_or_mask(
950
956
)
951
957
952
958
953
- @unittest .skip (reason = "Skip due to glitch in CI" )
954
959
@pytest .mark .parametrize ("num_server" , [1 ])
955
960
@pytest .mark .parametrize ("num_workers" , [0 , 1 ])
956
961
@pytest .mark .parametrize ("dataloader_type" , ["node" , "edge" ])
@@ -1097,7 +1102,7 @@ def test_multiple_dist_dataloaders(
1097
1102
generate_ip_config (ip_config , num_parts , num_servers )
1098
1103
1099
1104
orig_g = dgl .rand_graph (1000 , 10000 )
1100
- graph_name = "test_multiple_dataloaders "
1105
+ graph_name = f"graph_ { uuid . uuid4 () } "
1101
1106
partition_graph (
1102
1107
orig_g ,
1103
1108
graph_name ,
0 commit comments