20
20
import sys
21
21
import traceback
22
22
from MCintegration import MonteCarlo , MarkovChainMonteCarlo , Vegas
23
+
23
24
os .environ ["NCCL_DEBUG" ] = "OFF"
24
25
os .environ ["TORCH_DISTRIBUTED_DEBUG" ] = "OFF"
25
26
os .environ ["GLOG_minloglevel" ] = "2"
28
29
29
30
backend = "nccl"
30
31
32
+
31
33
def init_process (rank , world_size , fn , backend = backend ):
32
34
try :
33
35
dist .init_process_group (backend , rank = rank , world_size = world_size )
@@ -38,6 +40,7 @@ def init_process(rank, world_size, fn, backend=backend):
38
40
dist .destroy_process_group ()
39
41
raise e
40
42
43
+
41
44
def run_mcmc (rank , world_size ):
42
45
try :
43
46
if rank != 0 :
@@ -49,7 +52,6 @@ def unit_circle_integrand(x, f):
49
52
f [:, 0 ] = (x [:, 0 ] ** 2 + x [:, 1 ] ** 2 < 1 ).double ()
50
53
return f [:, 0 ]
51
54
52
-
53
55
def half_sphere_integrand (x , f ):
54
56
f [:, 0 ] = torch .clamp (1 - (x [:, 0 ] ** 2 + x [:, 1 ] ** 2 ), min = 0 ) * 2
55
57
return f [:, 0 ]
@@ -66,12 +68,14 @@ def half_sphere_integrand(x, f):
66
68
67
69
# Monte Carlo and MCMC for Unit Circle
68
70
mc_integrator = MonteCarlo (
69
- f = unit_circle_integrand , bounds = bounds , batch_size = batch_size ,
70
- device = device
71
+ f = unit_circle_integrand , bounds = bounds , batch_size = batch_size , device = device
71
72
)
72
73
mcmc_integrator = MarkovChainMonteCarlo (
73
- f = unit_circle_integrand , bounds = bounds , batch_size = batch_size , nburnin = n_therm ,
74
- device = device
74
+ f = unit_circle_integrand ,
75
+ bounds = bounds ,
76
+ batch_size = batch_size ,
77
+ nburnin = n_therm ,
78
+ device = device ,
75
79
)
76
80
77
81
print ("Unit Circle Integration Results:" )
@@ -81,16 +85,19 @@ def half_sphere_integrand(x, f):
81
85
# Train VEGAS map for Unit Circle
82
86
vegas_map .adaptive_training (batch_size , unit_circle_integrand , alpha = 0.5 )
83
87
vegas_integrator = MonteCarlo (
84
- bounds , f = unit_circle_integrand , maps = vegas_map , batch_size = batch_size ,
85
- device = device
88
+ bounds ,
89
+ f = unit_circle_integrand ,
90
+ maps = vegas_map ,
91
+ batch_size = batch_size ,
92
+ device = device ,
86
93
)
87
94
vegasmcmc_integrator = MarkovChainMonteCarlo (
88
95
bounds ,
89
96
f = unit_circle_integrand ,
90
97
maps = vegas_map ,
91
98
batch_size = batch_size ,
92
99
nburnin = n_therm ,
93
- device = device
100
+ device = device ,
94
101
)
95
102
96
103
print ("VEGAS:" , vegas_integrator (n_eval ))
@@ -105,14 +112,15 @@ def half_sphere_integrand(x, f):
105
112
print ("MCMC:" , mcmc_integrator (n_eval , mix_rate = 0.5 ))
106
113
107
114
vegas_map .make_uniform ()
108
- vegas_map .adaptive_training (batch_size , half_sphere_integrand , epoch = 10 , alpha = 0.5 )
115
+ vegas_map .adaptive_training (
116
+ batch_size , half_sphere_integrand , epoch = 10 , alpha = 0.5
117
+ )
109
118
vegas_integrator .f = half_sphere_integrand
110
119
vegasmcmc_integrator .f = half_sphere_integrand
111
120
112
121
print ("VEGAS:" , vegas_integrator (n_eval ))
113
122
print ("VEGAS-MCMC:" , vegasmcmc_integrator (n_eval , mix_rate = 0.5 ))
114
123
115
-
116
124
except Exception as e :
117
125
print (f"Error in run_mcmc for rank { rank } : { e } " )
118
126
traceback .print_exc ()
@@ -135,6 +143,7 @@ def test_mcmc(world_size):
135
143
except Exception as e :
136
144
print (f"Error in test_mcmc: { e } " )
137
145
146
+
138
147
if __name__ == "__main__" :
139
148
mp .set_start_method ("spawn" , force = True )
140
- test_mcmc (4 )
149
+ test_mcmc (4 )
0 commit comments