Source code for MCintegration.base
-import torch
-from torch import nn
-import numpy as np
-import sys
-from MCintegration.utils import get_device
+import torch
+from torch import nn
+import numpy as np
+import sys
+from MCintegration.utils import get_device
MINVAL = 10 ** (sys.float_info.min_10_exp + 50)
MAXVAL = 10 ** (sys.float_info.max_10_exp - 50)
@@ -54,13 +54,13 @@ Source code for MCintegration.base
[docs]
-class BaseDistribution(nn.Module):
+class BaseDistribution(nn.Module):
"""
Base distribution of a flow-based model
Parameters do not depend of target variable (as is the case for a VAE encoder)
"""
- def __init__(self, dim, device="cpu", dtype=torch.float32):
+ def __init__(self, dim, device="cpu", dtype=torch.float32):
super().__init__()
self.dtype = dtype
self.dim = dim
@@ -68,7 +68,7 @@ Source code for MCintegration.base
[docs]
- def sample(self, batch_size=1, **kwargs):
+ def sample(self, batch_size=1, **kwargs):
"""Samples from base distribution
Args:
@@ -82,7 +82,7 @@ Source code for MCintegration.base
[docs]
- def sample_with_detJ(self, batch_size=1, **kwargs):
+ def sample_with_detJ(self, batch_size=1, **kwargs):
u, detJ = self.sample(batch_size, **kwargs)
detJ.exp_()
return u, detJ
@@ -92,17 +92,17 @@ Source code for MCintegration.base
[docs]
-class Uniform(BaseDistribution):
+class Uniform(BaseDistribution):
"""
Multivariate uniform distribution
"""
- def __init__(self, dim, device="cpu", dtype=torch.float32):
+ def __init__(self, dim, device="cpu", dtype=torch.float32):
super().__init__(dim, device, dtype)
[docs]
- def sample(self, batch_size=1, **kwargs):
+ def sample(self, batch_size=1, **kwargs):
# torch.manual_seed(0) # test seed
u = torch.rand((batch_size, self.dim), device=self.device, dtype=self.dtype)
log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype)
@@ -113,8 +113,8 @@ Source code for MCintegration.base
[docs]
-class LinearMap(nn.Module):
- def __init__(self, A, b, device=None, dtype=torch.float32):
+class LinearMap(nn.Module):
+ def __init__(self, A, b, device=None, dtype=torch.float32):
if device is None:
self.device = get_device()
else:
@@ -140,13 +140,13 @@ Source code for MCintegration.base
[docs]
- def forward(self, u):
+ def forward(self, u):
return u * self.A + self.b, torch.log(self._detJ.repeat(u.shape[0]))
[docs]
- def forward_with_detJ(self, u):
+ def forward_with_detJ(self, u):
u, detJ = self.forward(u)
detJ.exp_()
return u, detJ
@@ -154,7 +154,7 @@ Source code for MCintegration.base
[docs]
- def inverse(self, x):
+ def inverse(self, x):
return (x - self.b) / self.A, torch.log(self._detJ.repeat(x.shape[0]))
diff --git a/_modules/MCintegration/integrators.html b/_modules/MCintegration/integrators.html
index 78a492a..e0650a6 100644
--- a/_modules/MCintegration/integrators.html
+++ b/_modules/MCintegration/integrators.html
@@ -5,7 +5,7 @@
MCintegration.integrators — MCintegration 1.0.0 documentation
-
+
@@ -41,22 +41,22 @@ Navigation
Source code for MCintegration.integrators
-from typing import Callable
-import torch
-from MCintegration.utils import RAvg, get_device
-from MCintegration.maps import Configuration, CompositeMap
-from MCintegration.base import Uniform, EPSILON, LinearMap
-import numpy as np
-from warnings import warn
+from typing import Callable
+import torch
+from MCintegration.utils import RAvg, get_device
+from MCintegration.maps import Configuration, CompositeMap
+from MCintegration.base import Uniform, EPSILON, LinearMap
+import numpy as np
+from warnings import warn
-import os
-import torch.distributed as dist
-import socket
+import os
+import torch.distributed as dist
+import socket
[docs]
-def get_ip() -> str:
+def get_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable
return s.getsockname()[0]
@@ -65,7 +65,7 @@ Source code for MCintegration.integrators
[docs]
-def get_open_port() -> int:
+def get_open_port() -> int:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return s.getsockname()[1]
@@ -74,7 +74,7 @@ Source code for MCintegration.integrators
[docs]
-def setup(backend="gloo"):
+def setup(backend="gloo"):
# get IDs of reserved GPU
distributed_init_method = f"tcp://{get_ip()}:{get_open_port()}"
dist.init_process_group(
@@ -89,21 +89,21 @@ Source code for MCintegration.integrators
[docs]
-class Integrator:
+class Integrator:
"""
Base class for all integrators. This class is designed to handle integration tasks
over a specified domain (bounds) using a sampling method (q0) and optional
transformation maps.
"""
- def __init__(
+ def __init__(
self,
bounds,
f: Callable,
@@ -178,12 +178,12 @@ Source code for MCintegration.integrators
self.rank = 0
self.world_size = 1
- def __call__(self, **kwargs):
+ def __call__(self, **kwargs):
raise NotImplementedError("Subclasses must implement this method")
[docs]
- def sample(self, config, **kwargs):
+ def sample(self, config, **kwargs):
config.u, config.detJ = self.q0.sample_with_detJ(config.batch_size)
if not self.maps:
config.x[:] = config.u
@@ -195,7 +195,7 @@ Source code for MCintegration.integrators
[docs]
- def statistics(self, means, vars, neval=None):
+ def statistics(self, means, vars, neval=None):
nblock = means.shape[0]
f_dim = means.shape[1]
nblock_total = nblock * self.world_size
@@ -268,8 +268,8 @@ Source code for MCintegration.integrators
[docs]
-class MonteCarlo(Integrator):
- def __init__(
+class MonteCarlo(Integrator):
+ def __init__(
self,
bounds,
f: Callable,
@@ -283,7 +283,7 @@ Source code for MCintegration.integrators
super().__init__(bounds, f, f_dim, maps, q0, batch_size, device, dtype)
self._rangebounds = self.bounds[:, 1] - self.bounds[:, 0]
- def __call__(self, neval, nblock=32, verbose=-1, **kwargs):
+ def __call__(self, neval, nblock=32, verbose=-1, **kwargs):
neval = neval // self.world_size
neval = -(-neval // self.batch_size) * self.batch_size
epoch = neval // self.batch_size
@@ -336,7 +336,7 @@ Source code for MCintegration.integrators
[docs]
-def random_walk(dim, device, dtype, u, **kwargs):
+def random_walk(dim, device, dtype, u, **kwargs):
step_size = kwargs.get("step_size", 0.2)
step_sizes = torch.ones(dim, device=device) * step_size
step = torch.empty(dim, device=device, dtype=dtype).uniform_(-1, 1) * step_sizes
@@ -347,14 +347,14 @@ Source code for MCintegration.integrators
[docs]
-def uniform(dim, device, dtype, u, **kwargs):
+def uniform(dim, device, dtype, u, **kwargs):
return torch.rand_like(u)
[docs]
-def gaussian(dim, device, dtype, u, **kwargs):
+def gaussian(dim, device, dtype, u, **kwargs):
mean = kwargs.get("mean", torch.zeros_like(u))
std = kwargs.get("std", torch.ones_like(u))
return torch.normal(mean, std)
@@ -363,8 +363,8 @@ Source code for MCintegration.integrators
[docs]
-class MarkovChainMonteCarlo(Integrator):
- def __init__(
+class MarkovChainMonteCarlo(Integrator):
+ def __init__(
self,
bounds,
f: Callable,
@@ -389,7 +389,7 @@ Source code for MCintegration.integrators
[docs]
- def sample(self, config, nsteps=1, mix_rate=0.5, **kwargs):
+ def sample(self, config, nsteps=1, mix_rate=0.5, **kwargs):
for _ in range(nsteps):
proposed_y = self.proposal_dist(
self.dim, self.device, self.dtype, config.u, **kwargs
@@ -415,7 +415,7 @@ Source code for MCintegration.integrators
config.detJ.mul_(~accept).add_(new_detJ * accept)
- def __call__(
+ def __call__(
self,
neval,
mix_rate=0.5,
diff --git a/_modules/MCintegration/maps.html b/_modules/MCintegration/maps.html
index 7be24e3..18cda2f 100644
--- a/_modules/MCintegration/maps.html
+++ b/_modules/MCintegration/maps.html
@@ -5,7 +5,7 @@
MCintegration.maps — MCintegration 1.0.0 documentation
-
+
@@ -41,20 +41,20 @@ Navigation
Source code for MCintegration.maps
-import numpy as np
-import torch
-from torch import nn
-from MCintegration.base import Uniform
-from MCintegration.utils import get_device
-import sys
+import numpy as np
+import torch
+from torch import nn
+from MCintegration.base import Uniform
+from MCintegration.utils import get_device
+import sys
TINY = 10 ** (sys.float_info.min_10_exp + 50)
[docs]
-class Configuration:
- def __init__(self, batch_size, dim, f_dim, device=None, dtype=torch.float32):
+class Configuration:
+ def __init__(self, batch_size, dim, f_dim, device=None, dtype=torch.float32):
if device is None:
self.device = get_device()
else:
@@ -62,18 +62,18 @@ Source code for MCintegration.maps
self.dim = dim
self.f_dim = f_dim
self.batch_size = batch_size
- self.u = torch.empty((batch_size, dim), dtype=dtype, device=self.device)
- self.x = torch.empty((batch_size, dim), dtype=dtype, device=self.device)
- self.fx = torch.empty((batch_size, f_dim), dtype=dtype, device=self.device)
- self.weight = torch.empty((batch_size,), dtype=dtype, device=self.device)
- self.detJ = torch.empty((batch_size,), dtype=dtype, device=self.device)
+ self.u = torch.rand((batch_size, dim), dtype=dtype, device=self.device)
+ self.x = torch.rand((batch_size, dim), dtype=dtype, device=self.device)
+ self.fx = torch.zeros((batch_size, f_dim), dtype=dtype, device=self.device)
+ self.weight = torch.ones((batch_size,), dtype=dtype, device=self.device)
+ self.detJ = torch.ones((batch_size,), dtype=dtype, device=self.device)
[docs]
-class Map(nn.Module):
- def __init__(self, device=None, dtype=torch.float32):
+class Map(nn.Module):
+ def __init__(self, device=None, dtype=torch.float32):
super().__init__()
if device is None:
self.device = get_device()
@@ -83,13 +83,13 @@ Source code for MCintegration.maps
[docs]
- def forward(self, u):
+ def forward(self, u):
raise NotImplementedError("Subclasses must implement this method")
[docs]
- def forward_with_detJ(self, u):
+ def forward_with_detJ(self, u):
u, detJ = self.forward(u)
detJ.exp_()
return u, detJ
@@ -97,7 +97,7 @@ Source code for MCintegration.maps
[docs]
- def inverse(self, x):
+ def inverse(self, x):
raise NotImplementedError("Subclasses must implement this method")
@@ -105,8 +105,8 @@ Source code for MCintegration.maps
[docs]
-class CompositeMap(Map):
- def __init__(self, maps, device=None, dtype=None):
+class CompositeMap(Map):
+ def __init__(self, maps, device=None, dtype=None):
if not maps:
raise ValueError("Maps can not be empty.")
if dtype is None:
@@ -121,7 +121,7 @@ Source code for MCintegration.maps
[docs]
- def forward(self, u):
+ def forward(self, u):
log_detJ = torch.zeros(len(u), device=u.device, dtype=self.dtype)
for map in self.maps:
u, log_detj = map.forward(u)
@@ -131,7 +131,7 @@ Source code for MCintegration.maps
[docs]
- def inverse(self, x):
+ def inverse(self, x):
log_detJ = torch.zeros(len(x), device=x.device, dtype=self.dtype)
for i in range(len(self.maps) - 1, -1, -1):
x, log_detj = self.maps[i].inverse(x)
@@ -143,8 +143,8 @@ Source code for MCintegration.maps
[docs]
-class Vegas(Map):
- def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32):
+class Vegas(Map):
+ def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32):
super().__init__(device, dtype)
self.dim = dim
@@ -168,11 +168,27 @@ Source code for MCintegration.maps
f"'ninc' must be a scalar or a 1D array of length {self.dim}."
)
+ # Preallocate tensors to minimize memory allocations
+ self.max_ninc = self.ninc.max().item()
+ # Preallocate temporary tensors for adapt
+ self.sum_f = torch.zeros(
+ self.dim, self.max_ninc, dtype=self.dtype, device=self.device
+ )
+ self.n_f = torch.zeros(
+ self.dim, self.max_ninc, dtype=self.dtype, device=self.device
+ )
+ self.avg_f = torch.ones(
+ (self.dim, self.max_ninc), dtype=self.dtype, device=self.device
+ )
+ self.tmp_f = torch.zeros(
+ (self.dim, self.max_ninc), dtype=self.dtype, device=self.device
+ )
+
self.make_uniform()
[docs]
- def adaptive_training(
+ def adaptive_training(
self,
batch_size,
f,
@@ -180,6 +196,16 @@ Source code for MCintegration.maps
epoch=10,
alpha=0.5,
):
+ """
+ Perform adaptive training to adjust the grid based on the training function.
+
+ Args:
+ batch_size (int): Number of samples per batch.
+ f (callable): Training function that takes x and fx as inputs.
+ f_dim (int, optional): Dimension of the function f. Defaults to 1.
+ epoch (int, optional): Number of training epochs. Defaults to 10.
+ alpha (float, optional): Adaptation rate. Defaults to 0.5.
+ """
q0 = Uniform(self.dim, device=self.device, dtype=self.dtype)
sample = Configuration(
batch_size, self.dim, f_dim, device=self.device, dtype=self.dtype
@@ -196,7 +222,8 @@ Source code for MCintegration.maps
[docs]
- def add_training_data(self, sample):
+ @torch.no_grad()
+ def add_training_data(self, sample):
"""Add training data ``f`` for ``u``-space points ``u``.
Accumulates training data for later use by ``self.adapt()``.
@@ -213,9 +240,6 @@ Source code for MCintegration.maps
point ``u[j, d]`` in ``u``-space.
"""
fval = (sample.detJ * sample.weight) ** 2
- if self.sum_f is None:
- self.sum_f = torch.zeros_like(self.inc)
- self.n_f = torch.zeros_like(self.inc) + TINY
iu = torch.floor(sample.u * self.ninc).long()
for d in range(self.dim):
indices = iu[:, d]
@@ -225,115 +249,130 @@ Source code for MCintegration.maps
[docs]
- def adapt(self, alpha=0.0):
- """Adapt grid to accumulated training data.
-
- ``self.adapt(...)`` projects the training data onto
- each axis independently and maps it into ``x`` space.
- It shrinks ``x``-grid increments in regions where the
- projected training data is large, and grows increments
- where the projected data is small. The grid along
- any direction is unchanged if the training data
- is constant along that direction.
-
- The number of increments along a direction can be
- changed by setting parameter ``ninc`` (array or number).
-
- The grid does not change if no training data has
- been accumulated, unless ``ninc`` is specified, in
- which case the number of increments is adjusted
- while preserving the relative density of increments
- at different values of ``x``.
+ @torch.no_grad()
+ def adapt(self, alpha=0.5):
+ """
+ Adapt the grid based on accumulated training data.
+
+ Shrinks grid increments in regions where the accumulated f is large,
+ and grows them where f is small. The adaptation speed is controlled by alpha.
Args:
- alpha (float): Determines the speed with which the grid
- adapts to training data. Large (postive) values imply
+ alpha (float, optional): Determines the speed with which the grid
+ adapts to training data. Large (positive) values imply
rapid evolution; small values (much less than one) imply
slow evolution. Typical values are of order one. Choosing
``alpha<0`` causes adaptation to the unmodified training
data (usually not a good idea).
"""
+ # Aggregate training data across distributed processes if applicable
if torch.distributed.is_initialized():
torch.distributed.all_reduce(self.sum_f, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(self.n_f, op=torch.distributed.ReduceOp.SUM)
+
+ # Initialize a new grid tensor
new_grid = torch.empty(
- (self.dim, torch.max(self.ninc) + 1),
- dtype=self.dtype,
- device=self.device,
+ (self.dim, self.max_ninc + 1), dtype=self.dtype, device=self.device
)
- avg_f = torch.ones(self.inc.shape[1], dtype=self.dtype, device=self.device)
+
if alpha > 0:
- tmp_f = torch.empty(self.inc.shape[1], dtype=self.dtype, device=self.device)
+ tmp_f = torch.empty(self.max_ninc, dtype=self.dtype, device=self.device)
+
+ # avg_f = torch.ones(self.inc.shape[1], dtype=self.dtype, device=self.device)
for d in range(self.dim):
- ninc = self.ninc[d]
+ ninc = self.ninc[d].item()
+
if alpha != 0:
- if self.sum_f is not None:
- mask = self.n_f[d, :] > 0
- avg_f[mask] = self.sum_f[d, mask] / self.n_f[d, mask]
- avg_f[~mask] = 0.0
- if alpha > 0: # smooth
- tmp_f[0] = torch.abs(7.0 * avg_f[0] + avg_f[1]) / 8.0
+ # Compute average f for current dimension where n_f > 0
+ mask = self.n_f[d, :ninc] > 0 # Shape: (ninc,)
+ avg_f = torch.where(
+ mask,
+ self.sum_f[d, :ninc] / self.n_f[d, :ninc],
+ torch.zeros_like(self.sum_f[d, :ninc]),
+ ) # Shape: (ninc,)
+
+ if alpha > 0:
+ # Smooth avg_f
+ tmp_f[0] = (7.0 * avg_f[0] + avg_f[1]).abs() / 8.0 # Shape: ()
tmp_f[ninc - 1] = (
- torch.abs(7.0 * avg_f[ninc - 1] + avg_f[ninc - 2]) / 8.0
- )
+ 7.0 * avg_f[ninc - 1] + avg_f[ninc - 2]
+ ).abs() / 8.0 # Shape: ()
tmp_f[1 : ninc - 1] = (
- torch.abs(
- 6.0 * avg_f[1 : ninc - 1]
- + avg_f[: ninc - 2]
- + avg_f[2:ninc]
- )
- / 8.0
+ 6.0 * avg_f[1 : ninc - 1] + avg_f[: ninc - 2] + avg_f[2:ninc]
+ ).abs() / 8.0
+
+ # Normalize tmp_f to ensure the sum is 1
+ sum_f = torch.sum(tmp_f[:ninc]).clamp_min_(TINY) # Scalar
+ avg_f = tmp_f[:ninc] / sum_f + TINY # Shape: (ninc,)
+
+ # Apply non-linear transformation controlled by alpha
+ avg_f = (-(1 - avg_f) / torch.log(avg_f)).pow_(
+ alpha
+ ) # Shape: (ninc,)
+
+ # Compute the target accumulated f per increment
+ f_ninc = avg_f.sum() / ninc # Scalar
+
+ new_grid[d, 0] = self.grid[d, 0]
+ new_grid[d, ninc] = self.grid[d, ninc]
+
+ target_cumulative_weights = (
+ torch.arange(1, ninc, device=self.device) * f_ninc
+ ) # Calculate the target cumulative weights for each new grid point
+
+ cumulative_avg_f = torch.cat(
+ (
+ torch.tensor([0.0], device=self.device),
+ torch.cumsum(avg_f, dim=0),
)
- sum_f = torch.sum(tmp_f[:ninc])
- if sum_f > 0:
- avg_f[:ninc] = tmp_f[:ninc] / sum_f + TINY
- else:
- avg_f[:ninc] = TINY
- avg_f[:ninc] = (
- -(1 - avg_f[:ninc]) / torch.log(avg_f[:ninc])
- ) ** alpha
-
- new_grid[d, 0] = self.grid[d, 0]
- new_grid[d, ninc] = self.grid[d, ninc]
- f_ninc = torch.sum(avg_f[:ninc]) / ninc
-
- j = -1
- acc_f = 0
- for i in range(1, ninc):
- while acc_f < f_ninc:
- j += 1
- if j < ninc:
- acc_f += avg_f[j]
- else:
- break
- else:
- acc_f -= f_ninc
- new_grid[d, i] = (
- self.grid[d, j + 1] - (acc_f / avg_f[j]) * self.inc[d, j]
+ ) # Calculate the cumulative sum of avg_f
+ interval_indices = (
+ torch.searchsorted(
+ cumulative_avg_f, target_cumulative_weights, right=True
)
- continue
- break
+ - 1
+ ) # Find the intervals in the original grid where the target weights fall
+ # Extract the necessary values using the interval indices
+ grid_left = self.grid[d, interval_indices]
+ inc_relevant = self.inc[d, interval_indices]
+ avg_f_relevant = avg_f[interval_indices]
+ cumulative_avg_f_relevant = cumulative_avg_f[interval_indices]
+
+ # Calculate the fractional position within each interval
+ fractional_positions = (
+ target_cumulative_weights - cumulative_avg_f_relevant
+ ) / avg_f_relevant
+
+ # Calculate the new grid points using vectorized operations
+ new_grid[d, 1:ninc] = grid_left + fractional_positions * inc_relevant
+ else:
+ # If alpha == 0 or no training data, retain the existing grid
+ new_grid[d, :] = self.grid[d, :]
+
+ # Assign the newly computed grid
self.grid = new_grid
- self.inc = torch.empty(
- (self.dim, self.grid.shape[1] - 1),
- dtype=self.dtype,
- device=self.device,
- )
+
+ # Update increments based on the new grid
+ # Compute the difference between consecutive grid points
+ self.inc.zero_() # Reset increments to zero
for d in range(self.dim):
self.inc[d, : self.ninc[d]] = (
self.grid[d, 1 : self.ninc[d] + 1] - self.grid[d, : self.ninc[d]]
)
+
+ # Clear accumulated training data for the next adaptation cycle
self.clear()
[docs]
- def make_uniform(self):
+ @torch.no_grad()
+ def make_uniform(self):
self.inc = torch.empty(
- self.dim, self.ninc.max(), dtype=self.dtype, device=self.device
+ self.dim, self.max_ninc, dtype=self.dtype, device=self.device
)
self.grid = torch.empty(
- self.dim, self.ninc.max() + 1, dtype=self.dtype, device=self.device
+ self.dim, self.max_ninc + 1, dtype=self.dtype, device=self.device
)
for d in range(self.dim):
@@ -352,92 +391,143 @@ Source code for MCintegration.maps
[docs]
- def extract_grid(self):
+ def extract_grid(self):
"Return a list of lists specifying the map's grid."
- grid = []
+ grid_list = []
for d in range(self.dim):
ng = self.ninc[d] + 1
- grid.append(self.grid[d, :ng].tolist())
- return grid
+ grid_list.append(self.grid[d, :ng].tolist())
+ return grid_list
[docs]
- def clear(self):
+ @torch.no_grad()
+ def clear(self):
"Clear information accumulated by :meth:`AdaptiveMap.add_training_data`."
- self.sum_f = None
- self.n_f = None
+ self.sum_f.zero_()
+ self.n_f.zero_()
[docs]
@torch.no_grad()
- def forward(self, u):
- # u = u.to(self.device)
+ def forward(self, u):
u_ninc = u * self.ninc
iu = torch.floor(u_ninc).long()
- du_ninc = u_ninc - torch.floor(u_ninc).long()
-
- x = torch.empty_like(u)
- detJ = torch.ones(u.shape[0], device=x.device)
- # self.detJ.fill_(1.0)
- for d in range(self.dim):
- # Handle the case where iu < ninc
- ninc = self.ninc[d]
- mask = iu[:, d] < ninc
- if mask.any():
- x[mask, d] = (
- self.grid[d, iu[mask, d]]
- + self.inc[d, iu[mask, d]] * du_ninc[mask, d]
- )
- detJ[mask] *= self.inc[d, iu[mask, d]] * ninc
+ du_ninc = u_ninc - iu
+
+ batch_size = u.size(0)
+ # Clamp iu to [0, ninc-1] to handle out-of-bounds indices
+ min_tensor = torch.zeros((1, self.dim), dtype=iu.dtype, device=self.device)
+ max_tensor = (self.ninc - 1).unsqueeze(0).to(iu.dtype) # Shape: (1, dim)
+ iu_clamped = torch.clamp(iu, min=min_tensor, max=max_tensor)
+
+ grid_expanded = self.grid.unsqueeze(0).expand(batch_size, -1, -1)
+ inc_expanded = self.inc.unsqueeze(0).expand(batch_size, -1, -1)
+
+ grid_gather = torch.gather(grid_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(
+ 2
+ ) # Shape: (batch_size, dim)
+ inc_gather = torch.gather(inc_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(2)
+
+ x = grid_gather + inc_gather * du_ninc
+ log_detJ = (inc_gather * self.ninc).log_().sum(dim=1)
+
+ # Handle out-of-bounds by setting x to grid boundary and adjusting detJ
+ out_of_bounds = iu >= self.ninc
+ if out_of_bounds.any():
+ # Create indices for out-of-bounds
+ # For each sample and dimension, set x to grid[d, ninc[d]]
+ # and log_detJ += log(inc[d, ninc[d]-1] * ninc[d])
+ boundary_grid = (
+ self.grid[torch.arange(self.dim, device=self.device), self.ninc]
+ .unsqueeze(0)
+ .expand(batch_size, -1)
+ )
+ # x = torch.where(out_of_bounds, boundary_grid, x)
+ x[out_of_bounds] = boundary_grid[out_of_bounds]
- # Handle the case where iu >= ninc
- mask_inv = ~mask
- if mask_inv.any():
- x[mask_inv, d] = self.grid[d, ninc]
- detJ[mask_inv] *= self.inc[d, ninc - 1] * ninc
+ boundary_inc = (
+ self.inc[torch.arange(self.dim, device=self.device), self.ninc - 1]
+ .unsqueeze(0)
+ .expand(batch_size, -1)
+ )
+ adj_log_detJ = ((boundary_inc * self.ninc).log_() * out_of_bounds).sum(
+ dim=1
+ )
+ log_detJ += adj_log_detJ
- return x, detJ.log_()
+ return x, log_detJ
[docs]
@torch.no_grad()
- def inverse(self, x):
- # self.detJ.fill_(1.0)
- x = x.to(self.device)
+ def inverse(self, x):
+ """
+ Inverse map from x-space to u-space.
+
+ Args:
+ x (torch.Tensor): Tensor of shape (batch_size, dim) representing points in x-space.
+
+ Returns:
+ u (torch.Tensor): Tensor of shape (batch_size, dim) representing points in u-space.
+ log_detJ (torch.Tensor): Tensor of shape (batch_size,) representing the log determinant of the Jacobian.
+ """
+ x.to(self.device)
+ batch_size, dim = x.shape
+
+ # Initialize output tensors
u = torch.empty_like(x)
- detJ = torch.ones(x.shape[0], device=x.device)
- for d in range(self.dim):
- ninc = self.ninc[d]
- iu = torch.searchsorted(self.grid[d, :], x[:, d].contiguous(), right=True)
-
- mask_valid = (iu > 0) & (iu <= ninc)
- mask_lower = iu <= 0
- mask_upper = iu > ninc
-
- # Handle valid range (0 < iu <= ninc)
- if mask_valid.any():
- iui_valid = iu[mask_valid] - 1
- u[mask_valid, d] = (
- iui_valid
- + (x[mask_valid, d] - self.grid[d, iui_valid])
- / self.inc[d, iui_valid]
- ) / ninc
- detJ[mask_valid] *= self.inc[d, iui_valid] * ninc
-
- # Handle lower bound (iu <= 0)\
- if mask_lower.any():
- u[mask_lower, d] = 0.0
- detJ[mask_lower] *= self.inc[d, 0] * ninc
-
- # Handle upper bound (iu > ninc)
- if mask_upper.any():
- u[mask_upper, d] = 1.0
- detJ[mask_upper] *= self.inc[d, ninc - 1] * ninc
-
- return u, detJ.log_()
+ log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype)
+
+ # Loop over each dimension to perform inverse mapping
+ for d in range(dim):
+ # Extract the grid and increment for dimension d
+ grid_d = self.grid[d] # Shape: (max_ninc + 1,)
+ inc_d = self.inc[d] # Shape: (max_ninc,)
+
+ # ninc_d = self.ninc[d].float() # Scalar tensor
+ ninc_d = self.ninc[d] # Scalar tensor
+
+ # Perform searchsorted to find indices where x should be inserted to maintain order
+ # torch.searchsorted returns indices in [0, max_ninc +1]
+ iu = (
+ torch.searchsorted(grid_d, x[:, d].contiguous(), right=True) - 1
+ ) # Shape: (batch_size,)
+
+ # Clamp indices to [0, ninc_d - 1] to ensure they are within valid range
+ iu_clamped = torch.clamp(iu, min=0, max=ninc_d - 1) # Shape: (batch_size,)
+
+ # Gather grid and increment values based on iu_clamped
+ # grid_gather and inc_gather have shape (batch_size,)
+ grid_gather = grid_d[iu_clamped] # Shape: (batch_size,)
+ inc_gather = inc_d[iu_clamped] # Shape: (batch_size,)
+
+ # Compute du: fractional part within the increment
+ du = (x[:, d] - grid_gather) / (inc_gather + TINY) # Shape: (batch_size,)
+
+ # Compute u for dimension d
+ u[:, d] = (du + iu_clamped) / ninc_d # Shape: (batch_size,)
+
+ # Compute log determinant contribution for dimension d
+ log_detJ += (inc_gather * ninc_d + TINY).log_() # Shape: (batch_size,)
+
+ # Handle out-of-bounds cases
+ # Lower bound: x <= grid[d, 0]
+ lower_mask = x[:, d] <= grid_d[0] # Shape: (batch_size,)
+ if lower_mask.any():
+ u[:, d].masked_fill_(lower_mask, 0.0)
+ log_detJ += (inc_d[0] * ninc_d + TINY).log_()
+
+ # Upper bound: x >= grid[d, ninc_d]
+ upper_mask = x[:, d] >= grid_d[ninc_d] # Shape: (batch_size,)
+ if upper_mask.any():
+ u[:, d].masked_fill_(upper_mask, 1.0)
+ log_detJ += (inc_d[ninc_d - 1] * ninc_d + TINY).log_()
+
+ return u, log_detJ
diff --git a/_modules/MCintegration/utils.html b/_modules/MCintegration/utils.html
index 6b75680..eec4a67 100644
--- a/_modules/MCintegration/utils.html
+++ b/_modules/MCintegration/utils.html
@@ -5,7 +5,7 @@
MCintegration.utils — MCintegration 1.0.0 documentation
-
+
@@ -41,11 +41,11 @@ Navigation
Source code for MCintegration.utils
-import torch
-from torch import nn
-import numpy as np
-import gvar
-import sys
+import torch
+from torch import nn
+import numpy as np
+import gvar
+import sys
MINVAL = 10 ** (sys.float_info.min_10_exp + 50)
MAXVAL = 10 ** (sys.float_info.max_10_exp - 50)
@@ -54,8 +54,8 @@ Source code for MCintegration.utils
[docs]
-class RAvg(gvar.GVar):
- def __init__(self, weighted=True, itn_results=None, sum_neval=0):
+class RAvg(gvar.GVar):
+ def __init__(self, weighted=True, itn_results=None, sum_neval=0):
if weighted:
self._wlist = []
self.weighted = True
@@ -80,7 +80,7 @@ Source code for MCintegration.utils
[docs]
- def update(self, mean, var, last_neval=None):
+ def update(self, mean, var, last_neval=None):
self.add(gvar.gvar(mean, var**0.5))
if last_neval is not None:
self.sum_neval += last_neval
@@ -88,7 +88,7 @@ Source code for MCintegration.utils
[docs]
- def add(self, res):
+ def add(self, res):
self.itn_results.append(res)
if isinstance(res, gvar.GVarRef):
return
@@ -110,14 +110,14 @@ Source code for MCintegration.utils
[docs]
- def extend(self, ravg):
+ def extend(self, ravg):
"""Merge results from :class:`RAvg` object ``ravg`` after results currently in ``self``."""
for r in ravg.itn_results:
self.add(r)
self.sum_neval += ravg.sum_neval
- def __reduce_ex__(self, protocol):
+ def __reduce_ex__(self, protocol):
return (
RAvg,
(
@@ -127,7 +127,7 @@ Source code for MCintegration.utils
),
)
- def _remove_gvars(self, gvlist):
+ def _remove_gvars(self, gvlist):
tmp = RAvg(
weighted=self.weighted,
itn_results=self.itn_results,
@@ -138,14 +138,14 @@ Source code for MCintegration.utils
super(RAvg, tmp).__init__(*tgvar(0, 0).internaldata)
return tmp
- def _distribute_gvars(self, gvlist):
+ def _distribute_gvars(self, gvlist):
return RAvg(
weighted=self.weighted,
itn_results=gvar.distribute_gvars(self.itn_results, gvlist),
sum_neval=self.sum_neval,
)
- def _chi2(self):
+ def _chi2(self):
if len(self.itn_results) <= 1:
return 0.0
if self.weighted:
@@ -163,19 +163,19 @@ Source code for MCintegration.utils
chi2 = property(_chi2, None, None, "*chi**2* of weighted average.")
- def _dof(self):
+ def _dof(self):
return len(self.itn_results) - 1
dof = property(
_dof, None, None, "Number of degrees of freedom in weighted average."
)
- def _nitn(self):
+ def _nitn(self):
return len(self.itn_results)
nitn = property(_nitn, None, None, "Number of iterations.")
- def _Q(self):
+ def _Q(self):
return (
gvar.gammaQ(self.dof / 2.0, self.chi2 / 2.0)
if self.dof > 0 and self.chi2 >= 0
@@ -189,7 +189,7 @@ Source code for MCintegration.utils
"*Q* or *p-value* of weighted average's *chi**2*.",
)
- def _avg_neval(self):
+ def _avg_neval(self):
return self.sum_neval / self.nitn if self.nitn > 0 else 0
avg_neval = property(
@@ -198,7 +198,7 @@ Source code for MCintegration.utils
[docs]
- def summary(self, weighted=None):
+ def summary(self, weighted=None):
"""Assemble summary of results, iteration-by-iteration, into a string.
Args:
@@ -242,11 +242,11 @@ Source code for MCintegration.utils
[docs]
- def converged(self, rtol, atol):
+ def converged(self, rtol, atol):
return self.sdev < atol + rtol * abs(self.mean)
- def __mul__(xx, yy):
+ def __mul__(xx, yy):
if type(yy) in _VECTOR_TYPES:
return NotImplemented # let ndarray handle it
elif isinstance(xx, RAvg):
@@ -274,7 +274,7 @@ Source code for MCintegration.utils
else:
return NotImplemented
- def __truediv__(xx, yy):
+ def __truediv__(xx, yy):
if type(yy) in _VECTOR_TYPES:
return NotImplemented # let ndarray handle it
elif isinstance(xx, RAvg):
@@ -306,7 +306,7 @@ Source code for MCintegration.utils
[docs]
-def set_seed(seed):
+def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
@@ -315,7 +315,7 @@ Source code for MCintegration.utils
[docs]
-def get_device():
+def get_device():
if torch.cuda.is_available():
return torch.cuda.current_device()
else:
diff --git a/_modules/index.html b/_modules/index.html
index dc4b43a..51618fc 100644
--- a/_modules/index.html
+++ b/_modules/index.html
@@ -5,7 +5,7 @@
Overview: module code — MCintegration 1.0.0 documentation
-
+
diff --git a/_static/pygments.css b/_static/pygments.css
index f295ee3..8a21f96 100644
--- a/_static/pygments.css
+++ b/_static/pygments.css
@@ -5,70 +5,70 @@ td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
.highlight { background: #f8f8f8; }
-.highlight .c { color: #60A0B0; font-style: italic } /* Comment */
-.highlight .err { color: #A40000; background-color: #FBE3E4 } /* Error */
+.highlight .c { color: #60a0b0; font-style: italic } /* Comment */
+.highlight .err { color: #a40000; background-color: #fbe3e4 } /* Error */
.highlight .k { color: #007020; font-weight: bold } /* Keyword */
-.highlight .o { color: #666 } /* Operator */
-.highlight .ch { color: #60A0B0; font-style: italic } /* Comment.Hashbang */
-.highlight .cm { color: #60A0B0; font-style: italic } /* Comment.Multiline */
+.highlight .o { color: #666666 } /* Operator */
+.highlight .ch { color: #60a0b0; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #60a0b0; font-style: italic } /* Comment.Multiline */
.highlight .cp { color: #007020 } /* Comment.Preproc */
-.highlight .cpf { color: #60A0B0; font-style: italic } /* Comment.PreprocFile */
-.highlight .c1 { color: #60A0B0; font-style: italic } /* Comment.Single */
-.highlight .cs { color: #60A0B0; background-color: #FFF0F0 } /* Comment.Special */
+.highlight .cpf { color: #60a0b0; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #60a0b0; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #60a0b0; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
-.highlight .gr { color: #F00 } /* Generic.Error */
+.highlight .gr { color: #FF0000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
-.highlight .go { color: #888 } /* Generic.Output */
-.highlight .gp { color: #C65D09; font-weight: bold } /* Generic.Prompt */
+.highlight .go { color: #888888 } /* Generic.Output */
+.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
-.highlight .gt { color: #04D } /* Generic.Traceback */
+.highlight .gt { color: #0044DD } /* Generic.Traceback */
.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #007020 } /* Keyword.Pseudo */
.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #902000 } /* Keyword.Type */
-.highlight .m { color: #40A070 } /* Literal.Number */
-.highlight .s { color: #4070A0 } /* Literal.String */
-.highlight .na { color: #0E84B5 } /* Name.Attribute */
+.highlight .m { color: #40a070 } /* Literal.Number */
+.highlight .s { color: #4070a0 } /* Literal.String */
+.highlight .na { color: #0e84b5 } /* Name.Attribute */
.highlight .nb { color: #007020 } /* Name.Builtin */
-.highlight .nc { color: #0E84B5; font-weight: bold } /* Name.Class */
-.highlight .no { color: #60ADD5 } /* Name.Constant */
-.highlight .nd { color: #555; font-weight: bold } /* Name.Decorator */
-.highlight .ni { color: #D55537; font-weight: bold } /* Name.Entity */
+.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
+.highlight .no { color: #60add5 } /* Name.Constant */
+.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
+.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
.highlight .ne { color: #007020 } /* Name.Exception */
-.highlight .nf { color: #06287E } /* Name.Function */
+.highlight .nf { color: #06287e } /* Name.Function */
.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
-.highlight .nn { color: #0E84B5; font-weight: bold } /* Name.Namespace */
+.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
-.highlight .nv { color: #BB60D5 } /* Name.Variable */
+.highlight .nv { color: #bb60d5 } /* Name.Variable */
.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
-.highlight .w { color: #BBB } /* Text.Whitespace */
-.highlight .mb { color: #40A070 } /* Literal.Number.Bin */
-.highlight .mf { color: #40A070 } /* Literal.Number.Float */
-.highlight .mh { color: #40A070 } /* Literal.Number.Hex */
-.highlight .mi { color: #40A070 } /* Literal.Number.Integer */
-.highlight .mo { color: #40A070 } /* Literal.Number.Oct */
-.highlight .sa { color: #4070A0 } /* Literal.String.Affix */
-.highlight .sb { color: #4070A0 } /* Literal.String.Backtick */
-.highlight .sc { color: #4070A0 } /* Literal.String.Char */
-.highlight .dl { color: #4070A0 } /* Literal.String.Delimiter */
-.highlight .sd { color: #4070A0; font-style: italic } /* Literal.String.Doc */
-.highlight .s2 { color: #4070A0 } /* Literal.String.Double */
-.highlight .se { color: #4070A0; font-weight: bold } /* Literal.String.Escape */
-.highlight .sh { color: #4070A0 } /* Literal.String.Heredoc */
-.highlight .si { color: #70A0D0; font-style: italic } /* Literal.String.Interpol */
-.highlight .sx { color: #C65D09 } /* Literal.String.Other */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mb { color: #40a070 } /* Literal.Number.Bin */
+.highlight .mf { color: #40a070 } /* Literal.Number.Float */
+.highlight .mh { color: #40a070 } /* Literal.Number.Hex */
+.highlight .mi { color: #40a070 } /* Literal.Number.Integer */
+.highlight .mo { color: #40a070 } /* Literal.Number.Oct */
+.highlight .sa { color: #4070a0 } /* Literal.String.Affix */
+.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
+.highlight .sc { color: #4070a0 } /* Literal.String.Char */
+.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
+.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #4070a0 } /* Literal.String.Double */
+.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
+.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
+.highlight .sx { color: #c65d09 } /* Literal.String.Other */
.highlight .sr { color: #235388 } /* Literal.String.Regex */
-.highlight .s1 { color: #4070A0 } /* Literal.String.Single */
+.highlight .s1 { color: #4070a0 } /* Literal.String.Single */
.highlight .ss { color: #517918 } /* Literal.String.Symbol */
.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
-.highlight .fm { color: #06287E } /* Name.Function.Magic */
-.highlight .vc { color: #BB60D5 } /* Name.Variable.Class */
-.highlight .vg { color: #BB60D5 } /* Name.Variable.Global */
-.highlight .vi { color: #BB60D5 } /* Name.Variable.Instance */
-.highlight .vm { color: #BB60D5 } /* Name.Variable.Magic */
-.highlight .il { color: #40A070 } /* Literal.Number.Integer.Long */
\ No newline at end of file
+.highlight .fm { color: #06287e } /* Name.Function.Magic */
+.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
+.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
+.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
+.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
+.highlight .il { color: #40a070 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/genindex.html b/genindex.html
index 3968d56..7f62789 100644
--- a/genindex.html
+++ b/genindex.html
@@ -5,7 +5,7 @@
Index — MCintegration 1.0.0 documentation
-
+
diff --git a/index.html b/index.html
index f23087e..2f3ba0b 100644
--- a/index.html
+++ b/index.html
@@ -6,7 +6,7 @@
MCintegration documentation — MCintegration 1.0.0 documentation
-
+
diff --git a/modules.html b/modules.html
index 6cbb261..14f1e4f 100644
--- a/modules.html
+++ b/modules.html
@@ -6,7 +6,7 @@
MCintegration — MCintegration 1.0.0 documentation
-
+
diff --git a/py-modindex.html b/py-modindex.html
index fd10448..3a6a2e3 100644
--- a/py-modindex.html
+++ b/py-modindex.html
@@ -5,7 +5,7 @@
Python Module Index — MCintegration 1.0.0 documentation
-
+
diff --git a/search.html b/search.html
index cdc5b42..96ff9ca 100644
--- a/search.html
+++ b/search.html
@@ -5,7 +5,7 @@
Search — MCintegration 1.0.0 documentation
-
+
diff --git a/searchindex.js b/searchindex.js
index 204402f..5661b57 100644
--- a/searchindex.js
+++ b/searchindex.js
@@ -1 +1 @@
-Search.setIndex({"alltitles": {"Indices and tables": [[1, "indices-and-tables"]], "MCintegration": [[2, null]], "MCintegration documentation": [[1, null]], "MCintegration package": [[0, null]], "Module contents": [[0, "module-MCintegration"]], "Multigpu Test": [[3, "multigpu-test"]], "Submodules": [[0, "submodules"]], "Table of test results": [[3, null]], "base module": [[0, "module-MCintegration.base"]], "integrators module": [[0, "module-MCintegration.integrators"]], "maps module": [[0, "module-MCintegration.maps"]], "utils module": [[0, "module-MCintegration.utils"]]}, "docnames": ["MCintegration", "index", "modules", "test_result"], "envversion": {"sphinx": 64, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1}, "filenames": ["MCintegration.rst", "index.rst", "modules.rst", "test_result.rst"], "indexentries": {"adapt() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.adapt", false]], "adaptive_training() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.adaptive_training", false]], "add() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.add", false]], "add_training_data() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.add_training_data", false]], "avg_neval (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.avg_neval", false]], "basedistribution (class in mcintegration.base)": [[0, "MCintegration.base.BaseDistribution", false]], "chi2 (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.chi2", false]], "cleanup() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.cleanup", false]], "clear() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.clear", false]], "compositemap (class in mcintegration.maps)": [[0, "MCintegration.maps.CompositeMap", false]], "configuration (class in mcintegration.maps)": [[0, "MCintegration.maps.Configuration", false]], "converged() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.converged", false]], "dof (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.dof", false]], "extend() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.extend", false]], "extract_grid() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.extract_grid", false]], "forward() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.forward", false]], "forward() (mcintegration.maps.compositemap method)": [[0, "MCintegration.maps.CompositeMap.forward", false]], "forward() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.forward", false]], "forward() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.forward", false]], "forward_with_detj() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.forward_with_detJ", false]], "forward_with_detj() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.forward_with_detJ", false]], "gaussian() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.gaussian", false]], "get_device() (in module mcintegration.utils)": [[0, "MCintegration.utils.get_device", false]], "get_ip() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.get_ip", false]], "get_open_port() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.get_open_port", false]], "integrator (class in mcintegration.integrators)": [[0, "MCintegration.integrators.Integrator", false]], "inverse() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.inverse", false]], "inverse() (mcintegration.maps.compositemap method)": [[0, "MCintegration.maps.CompositeMap.inverse", false]], "inverse() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.inverse", false]], "inverse() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.inverse", false]], "linearmap (class in mcintegration.base)": [[0, "MCintegration.base.LinearMap", false]], "make_uniform() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.make_uniform", false]], "map (class in mcintegration.maps)": [[0, "MCintegration.maps.Map", false]], "markovchainmontecarlo (class in mcintegration.integrators)": [[0, "MCintegration.integrators.MarkovChainMonteCarlo", false]], "mcintegration": [[0, "module-MCintegration", false]], "mcintegration.base": [[0, "module-MCintegration.base", false]], "mcintegration.integrators": [[0, "module-MCintegration.integrators", false]], "mcintegration.maps": [[0, "module-MCintegration.maps", false]], "mcintegration.utils": [[0, "module-MCintegration.utils", false]], "module": [[0, "module-MCintegration", false], [0, "module-MCintegration.base", false], [0, "module-MCintegration.integrators", false], [0, "module-MCintegration.maps", false], [0, "module-MCintegration.utils", false]], "montecarlo (class in mcintegration.integrators)": [[0, "MCintegration.integrators.MonteCarlo", false]], "nitn (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.nitn", false]], "q (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.Q", false]], "random_walk() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.random_walk", false]], "ravg (class in mcintegration.utils)": [[0, "MCintegration.utils.RAvg", false]], "sample() (mcintegration.base.basedistribution method)": [[0, "MCintegration.base.BaseDistribution.sample", false]], "sample() (mcintegration.base.uniform method)": [[0, "MCintegration.base.Uniform.sample", false]], "sample() (mcintegration.integrators.integrator method)": [[0, "MCintegration.integrators.Integrator.sample", false]], "sample() (mcintegration.integrators.markovchainmontecarlo method)": [[0, "MCintegration.integrators.MarkovChainMonteCarlo.sample", false]], "sample_with_detj() (mcintegration.base.basedistribution method)": [[0, "MCintegration.base.BaseDistribution.sample_with_detJ", false]], "set_seed() (in module mcintegration.utils)": [[0, "MCintegration.utils.set_seed", false]], "setup() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.setup", false]], "statistics() (mcintegration.integrators.integrator method)": [[0, "MCintegration.integrators.Integrator.statistics", false]], "summary() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.summary", false]], "uniform (class in mcintegration.base)": [[0, "MCintegration.base.Uniform", false]], "uniform() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.uniform", false]], "update() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.update", false]], "vegas (class in mcintegration.maps)": [[0, "MCintegration.maps.Vegas", false]]}, "objects": {"": [[0, 0, 0, "-", "MCintegration"]], "MCintegration": [[0, 0, 0, "-", "base"], [0, 0, 0, "-", "integrators"], [0, 0, 0, "-", "maps"], [0, 0, 0, "-", "utils"]], "MCintegration.base": [[0, 1, 1, "", "BaseDistribution"], [0, 1, 1, "", "LinearMap"], [0, 1, 1, "", "Uniform"]], "MCintegration.base.BaseDistribution": [[0, 2, 1, "", "sample"], [0, 2, 1, "", "sample_with_detJ"]], "MCintegration.base.LinearMap": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "forward_with_detJ"], [0, 2, 1, "", "inverse"]], "MCintegration.base.Uniform": [[0, 2, 1, "", "sample"]], "MCintegration.integrators": [[0, 1, 1, "", "Integrator"], [0, 1, 1, "", "MarkovChainMonteCarlo"], [0, 1, 1, "", "MonteCarlo"], [0, 3, 1, "", "cleanup"], [0, 3, 1, "", "gaussian"], [0, 3, 1, "", "get_ip"], [0, 3, 1, "", "get_open_port"], [0, 3, 1, "", "random_walk"], [0, 3, 1, "", "setup"], [0, 3, 1, "", "uniform"]], "MCintegration.integrators.Integrator": [[0, 2, 1, "", "sample"], [0, 2, 1, "", "statistics"]], "MCintegration.integrators.MarkovChainMonteCarlo": [[0, 2, 1, "", "sample"]], "MCintegration.maps": [[0, 1, 1, "", "CompositeMap"], [0, 1, 1, "", "Configuration"], [0, 1, 1, "", "Map"], [0, 1, 1, "", "Vegas"]], "MCintegration.maps.CompositeMap": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "inverse"]], "MCintegration.maps.Map": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "forward_with_detJ"], [0, 2, 1, "", "inverse"]], "MCintegration.maps.Vegas": [[0, 2, 1, "", "adapt"], [0, 2, 1, "", "adaptive_training"], [0, 2, 1, "", "add_training_data"], [0, 2, 1, "", "clear"], [0, 2, 1, "", "extract_grid"], [0, 2, 1, "", "forward"], [0, 2, 1, "", "inverse"], [0, 2, 1, "", "make_uniform"]], "MCintegration.utils": [[0, 1, 1, "", "RAvg"], [0, 3, 1, "", "get_device"], [0, 3, 1, "", "set_seed"]], "MCintegration.utils.RAvg": [[0, 4, 1, "", "Q"], [0, 2, 1, "", "add"], [0, 4, 1, "", "avg_neval"], [0, 4, 1, "", "chi2"], [0, 2, 1, "", "converged"], [0, 4, 1, "", "dof"], [0, 2, 1, "", "extend"], [0, 4, 1, "", "nitn"], [0, 2, 1, "", "summary"], [0, 2, 1, "", "update"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"], "4": ["py", "property", "Python property"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:function", "4": "py:property"}, "terms": {"": 0, "0": 0, "02m": 3, "08m": 3, "1": 0, "10": [0, 3], "1000": 0, "11": 3, "123m": 3, "14049": 3, "14090": 3, "14100": 3, "14123": 3, "14129": 3, "14137": 3, "14149": 3, "14150": 3, "14152": 3, "14156": 3, "14157": 3, "14173": 3, "14175": 3, "14182": 3, "14260": 3, "14313": 3, "16": 3, "16cpu": 3, "17": 3, "18m": 3, "1dcu": 3, "1node": 3, "2": 0, "20": 3, "22": 3, "22m": 3, "234m": 3, "2dcu": 3, "2node": 3, "3": 3, "32": 3, "32cpu": 3, "32m": 3, "33": 3, "33m": 3, "34m": 3, "39m": 3, "400000": 3, "42": 3, "47m": 3, "4dcu": 3, "5": 0, "55m": 3, "57m": 3, "58": 3, "6": 3, "64": 3, "64cpu": 3, "65": 3, "69m": 3, "73m": 3, "79": 3, "87m": 3, "8cpu": 3, "8dcu": 3, "A": 0, "It": 0, "The": 0, "accumul": 0, "across": 0, "adapt": [0, 2], "adaptive_train": [0, 2], "adaptivemap": 0, "add": [0, 2], "add_training_data": [0, 2], "adjust": 0, "after": 0, "afterward": 0, "all": 0, "along": 0, "alpha": 0, "although": 0, "ani": 0, "ar": 0, "arrai": 0, "assembl": 0, "atol": 0, "averag": 0, "avg_nev": [0, 2], "axi": 0, "b": 0, "backend": 0, "base": 2, "basedistribut": [0, 2], "batch_siz": 0, "been": 0, "behavior": 0, "bool": 0, "bound": 0, "call": 0, "callabl": 0, "can": 0, "care": 0, "case": 0, "caus": 0, "chang": 0, "chi": 0, "chi2": [0, 2], "choos": 0, "class": 0, "cleanup": [0, 2], "clear": [0, 2], "compositemap": [0, 2], "comput": 0, "config": 0, "configur": [0, 2, 3], "constant": 0, "content": [1, 2], "contigu": 0, "converg": [0, 2], "core": 3, "correspond": 0, "cpu": 0, "current": 0, "d": 0, "data": 0, "default": 0, "defin": 0, "degre": 0, "densiti": 0, "depend": 0, "design": 0, "determin": 0, "devic": 0, "differ": 0, "dim": 0, "direct": 0, "displai": 0, "distribut": 0, "distriubt": 0, "do": 0, "doe": 0, "dof": [0, 2], "domain": 0, "draw": 0, "drawn": 0, "dtype": 0, "each": 0, "encod": 0, "epoch": 0, "eval": 3, "evalu": 0, "everi": 0, "evolut": 0, "extend": [0, 2], "extract_grid": [0, 2], "f": 0, "f_dim": 0, "float": 0, "float32": 0, "flow": 0, "former": 0, "forward": [0, 2], "forward_with_detj": [0, 2], "freedom": 0, "from": 0, "function": 0, "gaussian": [0, 2], "get_devic": [0, 2], "get_ip": [0, 2], "get_open_port": [0, 2], "gloo": 0, "good": 0, "grid": 0, "grow": 0, "gvar": 0, "ha": 0, "handl": 0, "hook": 0, "i": 0, "idea": 0, "ignor": 0, "impli": 0, "increment": 0, "independ": 0, "index": 1, "inform": 0, "instanc": 0, "instead": 0, "int": 0, "integr": [2, 3], "integrand": 0, "invers": [0, 2], "iter": 0, "itn_result": 0, "j": 0, "kwarg": 0, "larg": 0, "larger": 0, "last_nev": 0, "later": 0, "latter": 0, "less": 0, "linearmap": [0, 2], "list": 0, "made": 0, "make_uniform": [0, 2], "map": 2, "markovchainmontecarlo": [0, 2], "mc": 3, "mcmc": 3, "mean": 0, "merg": 0, "method": 0, "mix_rat": 0, "model": 0, "modul": [1, 2], "montecarlo": [0, 2], "much": 0, "multigpu": 1, "multivari": 0, "nburnin": 0, "need": 0, "neval": 0, "ngpu": 3, "ninc": 0, "nitn": [0, 2], "none": 0, "nstep": 0, "num_sampl": 0, "number": 0, "object": 0, "one": 0, "onto": 0, "option": 0, "order": 0, "otherwis": 0, "over": 0, "overridden": 0, "p": 0, "packag": 2, "page": 1, "paramet": 0, "pass": 0, "per": 0, "perform": 0, "point": 0, "postiv": 0, "preserv": 0, "project": 0, "properti": 0, "proposal_dist": 0, "q": [0, 2], "q0": 0, "random_walk": [0, 2], "rapid": 0, "ravg": [0, 2], "re": 0, "recip": 0, "region": 0, "regist": 0, "rel": 0, "result": [0, 1], "return": 0, "rtol": 0, "run": 0, "sampl": [0, 2], "sample_with_detj": [0, 2], "search": 1, "seed": 0, "self": 0, "set": 0, "set_se": [0, 2], "setup": [0, 2], "should": 0, "show": 0, "shrink": 0, "silent": 0, "sinc": 0, "slow": 0, "small": 0, "smaller": 0, "sourc": 0, "space": 0, "specifi": 0, "speed": 0, "statist": [0, 2], "str": 0, "string": 0, "subclass": 0, "submodul": 2, "sum_nev": 0, "summari": [0, 2], "take": 0, "target": 0, "task": 0, "tensor": 0, "test": 1, "than": 0, "them": 0, "thi": 0, "torch": 0, "train": 0, "transform": 0, "true": 0, "typic": 0, "u": 0, "unchang": 0, "uniform": [0, 2], "unless": 0, "unmodifi": 0, "unweight": 0, "updat": [0, 2], "us": 0, "usual": 0, "util": 2, "vae": 0, "valu": 0, "var": 0, "variabl": 0, "vega": [0, 2], "weight": 0, "when": 0, "where": 0, "which": 0, "while": 0, "within": 0, "x": 0}, "titles": ["MCintegration package", "MCintegration documentation", "MCintegration", "Table of test results"], "titleterms": {"base": 0, "content": 0, "document": 1, "indic": 1, "integr": 0, "map": 0, "mcintegr": [0, 1, 2], "modul": 0, "multigpu": 3, "packag": 0, "result": 3, "submodul": 0, "tabl": [1, 3], "test": 3, "util": 0}})
\ No newline at end of file
+Search.setIndex({"alltitles": {"Indices and tables": [[1, "indices-and-tables"]], "MCintegration": [[2, null]], "MCintegration documentation": [[1, null]], "MCintegration package": [[0, null]], "Module contents": [[0, "module-MCintegration"]], "Multigpu Test": [[3, "multigpu-test"]], "Submodules": [[0, "submodules"]], "Table of test results": [[3, null]], "base module": [[0, "module-MCintegration.base"]], "integrators module": [[0, "module-MCintegration.integrators"]], "maps module": [[0, "module-MCintegration.maps"]], "utils module": [[0, "module-MCintegration.utils"]]}, "docnames": ["MCintegration", "index", "modules", "test_result"], "envversion": {"sphinx": 64, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1}, "filenames": ["MCintegration.rst", "index.rst", "modules.rst", "test_result.rst"], "indexentries": {"adapt() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.adapt", false]], "adaptive_training() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.adaptive_training", false]], "add() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.add", false]], "add_training_data() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.add_training_data", false]], "avg_neval (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.avg_neval", false]], "basedistribution (class in mcintegration.base)": [[0, "MCintegration.base.BaseDistribution", false]], "chi2 (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.chi2", false]], "cleanup() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.cleanup", false]], "clear() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.clear", false]], "compositemap (class in mcintegration.maps)": [[0, "MCintegration.maps.CompositeMap", false]], "configuration (class in mcintegration.maps)": [[0, "MCintegration.maps.Configuration", false]], "converged() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.converged", false]], "dof (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.dof", false]], "extend() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.extend", false]], "extract_grid() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.extract_grid", false]], "forward() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.forward", false]], "forward() (mcintegration.maps.compositemap method)": [[0, "MCintegration.maps.CompositeMap.forward", false]], "forward() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.forward", false]], "forward() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.forward", false]], "forward_with_detj() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.forward_with_detJ", false]], "forward_with_detj() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.forward_with_detJ", false]], "gaussian() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.gaussian", false]], "get_device() (in module mcintegration.utils)": [[0, "MCintegration.utils.get_device", false]], "get_ip() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.get_ip", false]], "get_open_port() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.get_open_port", false]], "integrator (class in mcintegration.integrators)": [[0, "MCintegration.integrators.Integrator", false]], "inverse() (mcintegration.base.linearmap method)": [[0, "MCintegration.base.LinearMap.inverse", false]], "inverse() (mcintegration.maps.compositemap method)": [[0, "MCintegration.maps.CompositeMap.inverse", false]], "inverse() (mcintegration.maps.map method)": [[0, "MCintegration.maps.Map.inverse", false]], "inverse() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.inverse", false]], "linearmap (class in mcintegration.base)": [[0, "MCintegration.base.LinearMap", false]], "make_uniform() (mcintegration.maps.vegas method)": [[0, "MCintegration.maps.Vegas.make_uniform", false]], "map (class in mcintegration.maps)": [[0, "MCintegration.maps.Map", false]], "markovchainmontecarlo (class in mcintegration.integrators)": [[0, "MCintegration.integrators.MarkovChainMonteCarlo", false]], "mcintegration": [[0, "module-MCintegration", false]], "mcintegration.base": [[0, "module-MCintegration.base", false]], "mcintegration.integrators": [[0, "module-MCintegration.integrators", false]], "mcintegration.maps": [[0, "module-MCintegration.maps", false]], "mcintegration.utils": [[0, "module-MCintegration.utils", false]], "module": [[0, "module-MCintegration", false], [0, "module-MCintegration.base", false], [0, "module-MCintegration.integrators", false], [0, "module-MCintegration.maps", false], [0, "module-MCintegration.utils", false]], "montecarlo (class in mcintegration.integrators)": [[0, "MCintegration.integrators.MonteCarlo", false]], "nitn (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.nitn", false]], "q (mcintegration.utils.ravg property)": [[0, "MCintegration.utils.RAvg.Q", false]], "random_walk() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.random_walk", false]], "ravg (class in mcintegration.utils)": [[0, "MCintegration.utils.RAvg", false]], "sample() (mcintegration.base.basedistribution method)": [[0, "MCintegration.base.BaseDistribution.sample", false]], "sample() (mcintegration.base.uniform method)": [[0, "MCintegration.base.Uniform.sample", false]], "sample() (mcintegration.integrators.integrator method)": [[0, "MCintegration.integrators.Integrator.sample", false]], "sample() (mcintegration.integrators.markovchainmontecarlo method)": [[0, "MCintegration.integrators.MarkovChainMonteCarlo.sample", false]], "sample_with_detj() (mcintegration.base.basedistribution method)": [[0, "MCintegration.base.BaseDistribution.sample_with_detJ", false]], "set_seed() (in module mcintegration.utils)": [[0, "MCintegration.utils.set_seed", false]], "setup() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.setup", false]], "statistics() (mcintegration.integrators.integrator method)": [[0, "MCintegration.integrators.Integrator.statistics", false]], "summary() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.summary", false]], "uniform (class in mcintegration.base)": [[0, "MCintegration.base.Uniform", false]], "uniform() (in module mcintegration.integrators)": [[0, "MCintegration.integrators.uniform", false]], "update() (mcintegration.utils.ravg method)": [[0, "MCintegration.utils.RAvg.update", false]], "vegas (class in mcintegration.maps)": [[0, "MCintegration.maps.Vegas", false]]}, "objects": {"": [[0, 0, 0, "-", "MCintegration"]], "MCintegration": [[0, 0, 0, "-", "base"], [0, 0, 0, "-", "integrators"], [0, 0, 0, "-", "maps"], [0, 0, 0, "-", "utils"]], "MCintegration.base": [[0, 1, 1, "", "BaseDistribution"], [0, 1, 1, "", "LinearMap"], [0, 1, 1, "", "Uniform"]], "MCintegration.base.BaseDistribution": [[0, 2, 1, "", "sample"], [0, 2, 1, "", "sample_with_detJ"]], "MCintegration.base.LinearMap": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "forward_with_detJ"], [0, 2, 1, "", "inverse"]], "MCintegration.base.Uniform": [[0, 2, 1, "", "sample"]], "MCintegration.integrators": [[0, 1, 1, "", "Integrator"], [0, 1, 1, "", "MarkovChainMonteCarlo"], [0, 1, 1, "", "MonteCarlo"], [0, 3, 1, "", "cleanup"], [0, 3, 1, "", "gaussian"], [0, 3, 1, "", "get_ip"], [0, 3, 1, "", "get_open_port"], [0, 3, 1, "", "random_walk"], [0, 3, 1, "", "setup"], [0, 3, 1, "", "uniform"]], "MCintegration.integrators.Integrator": [[0, 2, 1, "", "sample"], [0, 2, 1, "", "statistics"]], "MCintegration.integrators.MarkovChainMonteCarlo": [[0, 2, 1, "", "sample"]], "MCintegration.maps": [[0, 1, 1, "", "CompositeMap"], [0, 1, 1, "", "Configuration"], [0, 1, 1, "", "Map"], [0, 1, 1, "", "Vegas"]], "MCintegration.maps.CompositeMap": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "inverse"]], "MCintegration.maps.Map": [[0, 2, 1, "", "forward"], [0, 2, 1, "", "forward_with_detJ"], [0, 2, 1, "", "inverse"]], "MCintegration.maps.Vegas": [[0, 2, 1, "", "adapt"], [0, 2, 1, "", "adaptive_training"], [0, 2, 1, "", "add_training_data"], [0, 2, 1, "", "clear"], [0, 2, 1, "", "extract_grid"], [0, 2, 1, "", "forward"], [0, 2, 1, "", "inverse"], [0, 2, 1, "", "make_uniform"]], "MCintegration.utils": [[0, 1, 1, "", "RAvg"], [0, 3, 1, "", "get_device"], [0, 3, 1, "", "set_seed"]], "MCintegration.utils.RAvg": [[0, 4, 1, "", "Q"], [0, 2, 1, "", "add"], [0, 4, 1, "", "avg_neval"], [0, 4, 1, "", "chi2"], [0, 2, 1, "", "converged"], [0, 4, 1, "", "dof"], [0, 2, 1, "", "extend"], [0, 4, 1, "", "nitn"], [0, 2, 1, "", "summary"], [0, 2, 1, "", "update"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"], "4": ["py", "property", "Python property"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:function", "4": "py:property"}, "terms": {"": 0, "0": 0, "02m": 3, "08m": 3, "1": 0, "10": [0, 3], "1000": 0, "11": 3, "123m": 3, "14049": 3, "14090": 3, "14100": 3, "14123": 3, "14129": 3, "14137": 3, "14149": 3, "14150": 3, "14152": 3, "14156": 3, "14157": 3, "14173": 3, "14175": 3, "14182": 3, "14260": 3, "14313": 3, "16": 3, "16cpu": 3, "17": 3, "18m": 3, "1dcu": 3, "1node": 3, "2": 0, "20": 3, "22": 3, "22m": 3, "234m": 3, "2dcu": 3, "2node": 3, "3": 3, "32": 3, "32cpu": 3, "32m": 3, "33": 3, "33m": 3, "34m": 3, "39m": 3, "400000": 3, "42": 3, "47m": 3, "4dcu": 3, "5": 0, "55m": 3, "57m": 3, "58": 3, "6": 3, "64": 3, "64cpu": 3, "65": 3, "69m": 3, "73m": 3, "79": 3, "87m": 3, "8cpu": 3, "8dcu": 3, "A": 0, "The": 0, "accumul": 0, "across": 0, "adapt": [0, 2], "adaptive_train": [0, 2], "adaptivemap": 0, "add": [0, 2], "add_training_data": [0, 2], "adjust": 0, "after": 0, "afterward": 0, "all": 0, "along": 0, "alpha": 0, "although": 0, "ar": 0, "assembl": 0, "atol": 0, "averag": 0, "avg_nev": [0, 2], "b": 0, "backend": 0, "base": 2, "basedistribut": [0, 2], "batch": 0, "batch_siz": 0, "behavior": 0, "bool": 0, "bound": 0, "call": 0, "callabl": 0, "care": 0, "case": 0, "caus": 0, "chi": 0, "chi2": [0, 2], "choos": 0, "class": 0, "cleanup": [0, 2], "clear": [0, 2], "compositemap": [0, 2], "comput": 0, "config": 0, "configur": [0, 2, 3], "constant": 0, "content": [1, 2], "contigu": 0, "control": 0, "converg": [0, 2], "core": 3, "correspond": 0, "cpu": 0, "current": 0, "d": 0, "data": 0, "default": 0, "defin": 0, "degre": 0, "depend": 0, "design": 0, "determin": 0, "devic": 0, "differ": 0, "dim": 0, "dimens": 0, "direct": 0, "displai": 0, "distribut": 0, "distriubt": 0, "do": 0, "dof": [0, 2], "domain": 0, "draw": 0, "drawn": 0, "dtype": 0, "encod": 0, "epoch": 0, "eval": 3, "evalu": 0, "everi": 0, "evolut": 0, "extend": [0, 2], "extract_grid": [0, 2], "f": 0, "f_dim": 0, "float": 0, "float32": 0, "flow": 0, "former": 0, "forward": [0, 2], "forward_with_detj": [0, 2], "freedom": 0, "from": 0, "function": 0, "fx": 0, "gaussian": [0, 2], "get_devic": [0, 2], "get_ip": [0, 2], "get_open_port": [0, 2], "gloo": 0, "good": 0, "grid": 0, "grow": 0, "gvar": 0, "handl": 0, "hook": 0, "i": 0, "idea": 0, "ignor": 0, "impli": 0, "increment": 0, "index": 1, "inform": 0, "input": 0, "instanc": 0, "instead": 0, "int": 0, "integr": [2, 3], "integrand": 0, "invers": [0, 2], "iter": 0, "itn_result": 0, "j": 0, "jacobian": 0, "kwarg": 0, "larg": 0, "larger": 0, "last_nev": 0, "later": 0, "latter": 0, "less": 0, "linearmap": [0, 2], "list": 0, "log": 0, "log_detj": 0, "made": 0, "make_uniform": [0, 2], "map": 2, "markovchainmontecarlo": [0, 2], "mc": 3, "mcmc": 3, "mean": 0, "merg": 0, "method": 0, "mix_rat": 0, "model": 0, "modul": [1, 2], "montecarlo": [0, 2], "much": 0, "multigpu": 1, "multivari": 0, "nburnin": 0, "need": 0, "neval": 0, "ngpu": 3, "ninc": 0, "nitn": [0, 2], "none": 0, "nstep": 0, "num_sampl": 0, "number": 0, "object": 0, "one": 0, "option": 0, "order": 0, "otherwis": 0, "over": 0, "overridden": 0, "p": 0, "packag": 2, "page": 1, "paramet": 0, "pass": 0, "per": 0, "perform": 0, "point": 0, "posit": 0, "properti": 0, "proposal_dist": 0, "q": [0, 2], "q0": 0, "random_walk": [0, 2], "rapid": 0, "rate": 0, "ravg": [0, 2], "re": 0, "recip": 0, "region": 0, "regist": 0, "repres": 0, "result": [0, 1], "return": 0, "rtol": 0, "run": 0, "sampl": [0, 2], "sample_with_detj": [0, 2], "search": 1, "seed": 0, "self": 0, "set_se": [0, 2], "setup": [0, 2], "shape": 0, "should": 0, "show": 0, "shrink": 0, "silent": 0, "sinc": 0, "slow": 0, "small": 0, "smaller": 0, "sourc": 0, "space": 0, "specifi": 0, "speed": 0, "statist": [0, 2], "str": 0, "string": 0, "subclass": 0, "submodul": 2, "sum_nev": 0, "summari": [0, 2], "take": 0, "target": 0, "task": 0, "tensor": 0, "test": 1, "than": 0, "them": 0, "thi": 0, "torch": 0, "train": 0, "transform": 0, "true": 0, "type": 0, "typic": 0, "u": 0, "unchang": 0, "uniform": [0, 2], "unmodifi": 0, "unweight": 0, "updat": [0, 2], "us": 0, "usual": 0, "util": 2, "vae": 0, "valu": 0, "var": 0, "variabl": 0, "vega": [0, 2], "weight": 0, "when": 0, "where": 0, "which": 0, "while": 0, "within": 0, "x": 0}, "titles": ["MCintegration package", "MCintegration documentation", "MCintegration", "Table of test results"], "titleterms": {"base": 0, "content": 0, "document": 1, "indic": 1, "integr": 0, "map": 0, "mcintegr": [0, 1, 2], "modul": 0, "multigpu": 3, "packag": 0, "result": 3, "submodul": 0, "tabl": [1, 3], "test": 3, "util": 0}})
\ No newline at end of file
diff --git a/test_result.html b/test_result.html
index 96f1bc8..2e0e181 100644
--- a/test_result.html
+++ b/test_result.html
@@ -6,7 +6,7 @@
Table of test results — MCintegration 1.0.0 documentation
-
+