Skip to content

Commit fb6c0a4

Browse files
committed
fix conflicts
2 parents 0df1c57 + 44519c9 commit fb6c0a4

File tree

3 files changed

+46
-21
lines changed

3 files changed

+46
-21
lines changed

examples/example_1.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import sys
2121
import traceback
2222
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
23+
2324
os.environ["NCCL_DEBUG"] = "OFF"
2425
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
2526
os.environ["GLOG_minloglevel"] = "2"
@@ -28,6 +29,7 @@
2829

2930
backend = "nccl"
3031

32+
3133
def init_process(rank, world_size, fn, backend=backend):
3234
try:
3335
dist.init_process_group(backend, rank=rank, world_size=world_size)
@@ -38,6 +40,7 @@ def init_process(rank, world_size, fn, backend=backend):
3840
dist.destroy_process_group()
3941
raise e
4042

43+
4144
def run_mcmc(rank, world_size):
4245
try:
4346
if rank != 0:
@@ -49,7 +52,6 @@ def unit_circle_integrand(x, f):
4952
f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double()
5053
return f[:, 0]
5154

52-
5355
def half_sphere_integrand(x, f):
5456
f[:, 0] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2
5557
return f[:, 0]
@@ -66,12 +68,14 @@ def half_sphere_integrand(x, f):
6668

6769
# Monte Carlo and MCMC for Unit Circle
6870
mc_integrator = MonteCarlo(
69-
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size,
70-
device=device
71+
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size, device=device
7172
)
7273
mcmc_integrator = MarkovChainMonteCarlo(
73-
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size, nburnin=n_therm,
74-
device=device
74+
f=unit_circle_integrand,
75+
bounds=bounds,
76+
batch_size=batch_size,
77+
nburnin=n_therm,
78+
device=device,
7579
)
7680

7781
print("Unit Circle Integration Results:")
@@ -81,16 +85,19 @@ def half_sphere_integrand(x, f):
8185
# Train VEGAS map for Unit Circle
8286
vegas_map.adaptive_training(batch_size, unit_circle_integrand, alpha=0.5)
8387
vegas_integrator = MonteCarlo(
84-
bounds, f=unit_circle_integrand, maps=vegas_map, batch_size=batch_size,
85-
device=device
88+
bounds,
89+
f=unit_circle_integrand,
90+
maps=vegas_map,
91+
batch_size=batch_size,
92+
device=device,
8693
)
8794
vegasmcmc_integrator = MarkovChainMonteCarlo(
8895
bounds,
8996
f=unit_circle_integrand,
9097
maps=vegas_map,
9198
batch_size=batch_size,
9299
nburnin=n_therm,
93-
device=device
100+
device=device,
94101
)
95102

96103
print("VEGAS:", vegas_integrator(n_eval))
@@ -105,14 +112,15 @@ def half_sphere_integrand(x, f):
105112
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
106113

107114
vegas_map.make_uniform()
108-
vegas_map.adaptive_training(batch_size, half_sphere_integrand, epoch=10, alpha=0.5)
115+
vegas_map.adaptive_training(
116+
batch_size, half_sphere_integrand, epoch=10, alpha=0.5
117+
)
109118
vegas_integrator.f = half_sphere_integrand
110119
vegasmcmc_integrator.f = half_sphere_integrand
111120

112121
print("VEGAS:", vegas_integrator(n_eval))
113122
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
114123

115-
116124
except Exception as e:
117125
print(f"Error in run_mcmc for rank {rank}: {e}")
118126
traceback.print_exc()
@@ -135,6 +143,7 @@ def test_mcmc(world_size):
135143
except Exception as e:
136144
print(f"Error in test_mcmc: {e}")
137145

146+
138147
if __name__ == "__main__":
139148
mp.set_start_method("spawn", force=True)
140-
test_mcmc(4)
149+
test_mcmc(4)

examples/example_2.py

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import sys
2121
import traceback
2222
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
23+
2324
os.environ["NCCL_DEBUG"] = "OFF"
2425
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
2526
os.environ["GLOG_minloglevel"] = "2"
@@ -28,6 +29,7 @@
2829

2930
backend = "nccl"
3031

32+
3133
def init_process(rank, world_size, fn, backend=backend):
3234
try:
3335
dist.init_process_group(backend, rank=rank, world_size=world_size)
@@ -38,6 +40,7 @@ def init_process(rank, world_size, fn, backend=backend):
3840
dist.destroy_process_group()
3941
raise e
4042

43+
4144
def run_mcmc(rank, world_size):
4245
try:
4346
if rank != 0:
@@ -53,7 +56,6 @@ def sharp_integrands(x, f):
5356
f[:, 2] = f[:, 0] * x[:, 0] ** 2
5457
return f.mean(dim=-1)
5558

56-
5759
dim = 4
5860
bounds = [(0, 1)] * dim
5961
n_eval = 6400000
@@ -68,20 +70,36 @@ def sharp_integrands(x, f):
6870

6971
# Plain MC and MCMC
7072
mc_integrator = MonteCarlo(
71-
f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size,device=device
73+
f=sharp_integrands,
74+
f_dim=3,
75+
bounds=bounds,
76+
batch_size=batch_size,
77+
device=device,
7278
)
7379
mcmc_integrator = MarkovChainMonteCarlo(
74-
f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, nburnin=n_therm,device=device
80+
f=sharp_integrands,
81+
f_dim=3,
82+
bounds=bounds,
83+
batch_size=batch_size,
84+
nburnin=n_therm,
85+
device=device,
7586
)
7687

7788
print("Sharp Peak Integration Results:")
7889
print("Plain MC:", mc_integrator(n_eval))
7990
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
8091

8192
# Train VEGAS map
82-
vegas_map.adaptive_training(batch_size, sharp_integrands, f_dim=3, epoch=10, alpha=2.0)
93+
vegas_map.adaptive_training(
94+
batch_size, sharp_integrands, f_dim=3, epoch=10, alpha=2.0
95+
)
8396
vegas_integrator = MonteCarlo(
84-
bounds, f=sharp_integrands, f_dim=3, maps=vegas_map, batch_size=batch_size,device=device
97+
bounds,
98+
f=sharp_integrands,
99+
f_dim=3,
100+
maps=vegas_map,
101+
batch_size=batch_size,
102+
device=device,
85103
)
86104
vegasmcmc_integrator = MarkovChainMonteCarlo(
87105
bounds,
@@ -90,13 +108,12 @@ def sharp_integrands(x, f):
90108
maps=vegas_map,
91109
batch_size=batch_size,
92110
nburnin=n_therm,
93-
device=device
111+
device=device,
94112
)
95113

96114
print("VEGAS:", vegas_integrator(n_eval))
97115
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
98116

99-
100117
except Exception as e:
101118
print(f"Error in run_mcmc for rank {rank}: {e}")
102119
traceback.print_exc()
@@ -119,6 +136,7 @@ def test_mcmc(world_size):
119136
except Exception as e:
120137
print(f"Error in test_mcmc: {e}")
121138

139+
122140
if __name__ == "__main__":
123141
mp.set_start_method("spawn", force=True)
124-
test_mcmc(4)
142+
test_mcmc(4)

examples/example_3.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
# Example 3: Integration of log(x)/sqrt(x) using VEGAS
2-
31
import torch
42
import torch.distributed as dist
53
import torch.multiprocessing as mp

0 commit comments

Comments
 (0)