Skip to content

Commit c81fd82

Browse files
committed
small changes to docs
1 parent 66aced7 commit c81fd82

File tree

7 files changed

+48
-18
lines changed

7 files changed

+48
-18
lines changed

pyapprox/surrogates/approximate.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,9 +241,7 @@ def adaptive_approximate_sparse_grid(
241241
if max_level_1d is None:
242242
msg = "max_level_1d must be set if config_var_trans is provided"
243243
#raise ValueError(msg)
244-
print(max_level_1d)
245244
for ii, cv in enumerate(config_var_trans.config_values):
246-
print(len(cv))
247245
if len(cv) <= max_level_1d[config_variables_idx+ii]:
248246
msg = f"maxlevel_1d {max_level_1d} and "
249247
msg += "config_var_trans.config_values with shapes {0}".format(

pyapprox/surrogates/interp/sparse_grid.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,8 @@ def plot_sparse_grid_2d(samples, weights, poly_indices=None, subspace_indices=No
846846
return axs
847847

848848

849-
def plot_sparse_grid_3d(samples, weights, poly_indices=None, subspace_indices=None,
849+
def plot_sparse_grid_3d(samples, weights, poly_indices=None,
850+
subspace_indices=None,
850851
active_samples=None, active_subspace_indices=None):
851852
from pyapprox.util.visualization import plot_3d_indices
852853
if samples.shape[0] != 3:

pyapprox/surrogates/interp/tensorprod.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -551,14 +551,14 @@ def plot_single_basis(self, ax, nodes_1d, ii, jj, nodes=None,
551551
single_basis_fun, plot_limits, num_pts_1d)
552552
if surface_cmap is not None:
553553
plot_surface(X, Y, Z, ax, axis_labels=None, limit_state=None,
554-
alpha=0.3, cmap="coolwarm", zorder=3, plot_axes=False)
554+
alpha=0.3, cmap=surface_cmap, zorder=3, plot_axes=False)
555555
if contour_cmap is not None:
556556
num_contour_levels = 30
557557
offset = -(Z.max()-Z.min())/2
558558
ax.contourf(
559559
X, Y, Z, zdir='z', offset=offset,
560560
levels=np.linspace(Z.min(), Z.max(), num_contour_levels),
561-
cmap="gray", zorder=-1)
561+
cmap=contour_cmap, zorder=-1)
562562

563563
if nodes is None:
564564
return

tutorials/multi_fidelity/plot_multiindex_collocation.py

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,29 +64,29 @@ def build_tp(fun, max_level_1d):
6464
axs[0].plot(zz, funs[1](zz[None, :]), 'r', label=r"$f_1$")
6565
axs[0].plot(lf_approx.samples[0], lf_approx.values[:, 0], 'ko')
6666
axs[0].plot(zz, lf_approx(zz[None])[:, 0], 'g:', label=r"$f_{0,\mathcal{I}_0}$")
67-
axs[0].legend()
67+
axs[0].legend(fontsize=18)
6868

6969
hf_approx = build_tp(funs[1], 1)
7070
axs[1].plot(zz, funs[1](zz[None, :]), 'r', label=r"$f_1$")
7171
axs[1].plot(hf_approx.samples[0], hf_approx.values[:, 0], 'ro')
7272
axs[1].plot(zz, hf_approx(zz[None])[:, 0], ':', color='gray',
7373
label=r"$f_{1,\mathcal{I}_1}$")
74-
axs[1].legend()
74+
axs[1].legend(fontsize=18)
7575

7676
def discrepancy_fun(fun1, fun0, zz):
7777
return fun1(zz)-fun0(zz)
7878

7979
discp_approx = build_tp(partial(discrepancy_fun, funs[1], lf_approx), 1)
8080
axs[2].plot(zz, funs[1](zz[None, :]), 'r', label=r"$f_1$")
8181
axs[2].plot(zz, funs[1](zz[None, :])-funs[0](zz[None, :]), 'k',
82-
label=r"$f_1-f_0$")
82+
label=r"$\delta=f_1-f_0$")
8383
axs[2].plot(discp_approx.samples[0], discp_approx.values[:, 0], 'ko')
8484
axs[2].plot(zz, discp_approx(zz[None, :]),
85-
'g:', label=r"$f_{0,\mathcal{I}_0}+\delta_{\mathcal{I}_1}$")
85+
'g:', label=r"$\delta_{\mathcal{I}_1}$")
8686
axs[2].plot(zz, lf_approx(zz[None])+discp_approx(zz[None, :]),
8787
'b:', label=r"$f_{0,\mathcal{I}_1}+\delta_{\mathcal{I}_1}$")
8888
[ax.set_xlabel(r"$z$") for ax in axs]
89-
_ = axs[2].legend()
89+
_ = axs[2].legend(fontsize=18)
9090

9191
#%%
9292
#The left plot shows that using 5 samples of the low-fidelity model produces an accurate approximation of the low-fidelity model, but it will be a poor approximation of the high fidelity model in the limit of infinite low-fidelity data. The middle plot shows three samples of the high-fidelity model also produces a poor approximation, but if more samples were added the approximation would coverge to the high-fidelity model. In contrast the right plot shows that 5 samples of the low-fideliy model plus three samples of the high-fidelity model produces a good approximation of the high-fidelity model.
@@ -143,7 +143,7 @@ def setup_model(config_values):
143143
tp_approx.samples_1d, tp_approx.config_variables_idx)
144144
ax.plot(zz, subspace_approx_vals, '--', color=approx_colors[kk],
145145
label=r"$f_{%d,%d}$" % (kk, jj))
146-
ax.legend()
146+
ax.legend(fontsize=18)
147147
_ = [[ax.set_ylim([-1, 1]), ax.set_xlabel(r"$z$")] for ax in axs.flatten()]
148148

149149
#%%
@@ -228,21 +228,36 @@ def __call__(self, approx):
228228
{"refinement_indicator": variance_refinement_indicator,
229229
"max_level_1d": [10, len(config_values[0])-1],
230230
"univariate_quad_rule_info": None,
231-
"max_level": np.inf, "max_nsamples": 50,
231+
"max_level": np.inf, "max_nsamples": 80,
232232
"config_variables_idx": nvars,
233233
"config_var_trans": config_var_trans,
234234
"cost_function": cost_function,
235235
"callback": adaptive_callback}).approx
236236

237+
from pyapprox.interface.wrappers import SingleFidelityWrapper
238+
hf_model = SingleFidelityWrapper(mi_model, config_values[0][2:3])
239+
mf_model = SingleFidelityWrapper(mi_model, config_values[0][1:2])
240+
lf_model = SingleFidelityWrapper(mi_model, config_values[0][:1])
241+
237242
#%%
238243
#Now plot the adaptive algorithm
239-
fig, axs = plt.subplots(1, 2, sharey=False, figsize=(16, 6))
244+
fig, axs = plt.subplots(1, 3, sharey=False, figsize=(3*8, 6))
245+
plot_xx = np.linspace(-1, 1, 101)
240246
def animate(ii):
241247
[ax.clear() for ax in axs]
242248
sg = adaptive_callback.sparse_grids[ii]
243249
plot_adaptive_sparse_grid_2d(sg, axs=axs[:2])
244250
axs[0].set_xlim([0, 10])
245251
axs[0].set_ylim([0, len(config_values[0])-1])
252+
axs[1].set_ylim([0, len(config_values[0])-1])
253+
axs[1].set_ylabel(r"$\alpha_1$")
254+
axs[2].plot(plot_xx, lf_model(plot_xx[None, :]), 'r', label=r"$f_0(z_1)$")
255+
axs[2].plot(plot_xx, mf_model(plot_xx[None, :]), 'g', label=r"$f_1(z_1)$")
256+
axs[2].plot(plot_xx, hf_model(plot_xx[None, :]), 'k', label=r"$f_2(z_1)$")
257+
axs[2].plot(plot_xx, sg(plot_xx[None, :]), '--b', label=r"$f_{I}(z_1)$")
258+
axs[2].set_xlabel(r"$z_1$")
259+
axs[2].legend(fontsize=18)
260+
246261

247262
import matplotlib.animation as animation
248263
ani = animation.FuncAnimation(

tutorials/multi_fidelity/plot_multioutput_acv.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
========================================
44
55
This tutorial demonstrates how computing statistics for multiple outputs simultaneoulsy can improve the accuracy of ACV estimates of individual statistics when compared to ACV applied to each output separately.
6+
7+
The optimal control variate weights are obtained by minimizing the estimator covariance [RM1985]_.
68
"""
79
import numpy as np
810
import matplotlib.pyplot as plt
@@ -53,3 +55,9 @@ def __call__(self, est_covariance, est):
5355
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
5456
_ = mf.plot_estimator_variance_reductions(
5557
[est, est_0], est_labels, ax, criteria=CustomComparisionCriteria())
58+
59+
60+
#%%
61+
#References
62+
#----------
63+
#[RM1985] `Reuven Y. Rubinstein and Ruth Marcus. Efficiency of multivariate control variates in monte carlo simulation. Operations Research, 33(3):661–677, 1985. <https://doi.org/10.48550/arXiv.2310.00125>`_

tutorials/surrogates/plot_sparse_grids.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def __call__(self, approx):
318318
fig, axs = plt.subplots(1, 3, sharey=False, figsize=(3*8, 6))
319319
ranges = benchmark.variable.get_statistics("interval", 1.0).flatten()
320320
data = [get_meshgrid_function_data(sg, ranges, 51)
321-
for sg in adaptive_callback.sparse_grids]
321+
for sg in adaptive_callback.sparse_grids]
322322
Z_min = np.min([d[2] for d in data])
323323
Z_max = np.max([d[2] for d in data])
324324
levels = np.linspace(Z_min, Z_max, 21)

tutorials/surrogates/plot_univariate_interpolation.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,12 @@
6767
lagrange_basis = UnivariateLagrangeBasis()
6868
lagrange_basis_vals = lagrange_basis(cheby_nodes, samples)
6969
ax[0].plot(samples, lagrange_basis_vals)
70+
ax[0].plot(cheby_nodes, cheby_nodes*0, 'ko')
7071
equidistant_nodes = np.linspace(-1, 1, nnodes)
7172
quadratic_basis = UnivariatePiecewiseQuadraticBasis()
7273
piecewise_basis_vals = quadratic_basis(equidistant_nodes, samples)
7374
_ = ax[1].plot(samples, piecewise_basis_vals)
75+
_ = ax[1].plot(equidistant_nodes, equidistant_nodes*0, 'ko')
7476

7577
#%%
7678
#Notice that the unlike the lagrange basis the picewise polynomial basis is non-zero only on a local region of the input space.
@@ -160,7 +162,7 @@ def fun(samples):
160162
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
161163
plot_xx = np.linspace(0, 1, 101)
162164
true_vals = benchmark.fun(plot_xx[None, :])
163-
pbwt = "w"
165+
pbwt = r"\pi"
164166
ax.plot(plot_xx, true_vals, '-r', label=r'$f(z)$')
165167
ax.plot(plot_xx, interp(plot_xx[None, :]), ':k', label=r'$f_M^\nu$')
166168
ax.plot(train_samples[0], train_values[:, 0], 'ko', ms=10,
@@ -176,7 +178,8 @@ def fun(samples):
176178
plot_xx, ax.get_ylim()[0], pdf_vals+ax.get_ylim()[0],
177179
alpha=0.3, visible=True,
178180
label=r'$%s(z)$' % pbwt)
179-
_ = ax.legend()
181+
ax.set_xlabel(r'$M$', fontsize=24)
182+
_ = ax.legend(fontsize=18, loc="upper right")
180183

181184
#%%
182185
#As you can see the approximation that targets the uniform norm is "more accurate" on average over the domain, but the interpolant that directly targets accuracy with respect to the desired Beta distribution is more accurate in the regions of non-negligible probability.
@@ -237,6 +240,11 @@ def compute_L2_error(interp, validation_samples, validation_values):
237240
ax.semilogy(ntrain_samples_list, results,
238241
label="{0:1.2f}".format(density_ratio))
239242

240-
ax.set_xlabel(r'$M$')
241-
ax.set_ylabel(r'$\| f-f_M^\nu\|_{L^2_%s}$' % pbwt)
243+
ax.set_xlabel(r'$M$', fontsize=24)
244+
ax.set_ylabel(r'$\| f-f_M^\nu\|_{L^2_%s}$' % pbwt, fontsize=24)
242245
_ = ax.legend(ncol=2)
246+
247+
#%%
248+
#References
249+
#----------
250+
#.. [XJD2013] `Chen Xiaoxiao, Park Eun-Jae, Xiu Dongbin. A flexible numerical approach for quantification of epistemic uncertainty. J. Comput. Phys., 240 (2013), pp. 211-224 <https://doi.org/10.1016/j.jcp.2013.01.018>`_

0 commit comments

Comments
 (0)