Skip to content

Commit 66aced7

Browse files
committed
Add tutorial on univariate interpolation
1 parent 89aed32 commit 66aced7

File tree

9 files changed

+410
-144
lines changed

9 files changed

+410
-144
lines changed

docs/source/conf.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@
7070
# ExpDesign
7171
# 'plot_bayesian_oed.py', # ignore until paper published
7272
# Surrogates
73+
'plot_univariate_interpolation.py',
7374
'plot_tensor_product_interpolation.py',
7475
'plot_sparse_grids.py',
7576
'plot_adaptive_leja_interpolation.py',
@@ -164,7 +165,7 @@ def __call__(self, filename):
164165
# add_markdown_cell(work_notebook, first_cell)#jdj
165166
# add_code_cell(work_notebook,"%matplotlib inline")#jdj
166167
# then add user defs like so
167-
sphinx_gallery_conf['first_notebook_cell'] = r"Add latex macros$$\newcommand{\V}[1]{{\boldsymbol{#1}}}\newcommand{mean}[1]{{\mathbb{E}\left[#1\right]}}\newcommand{var}[1]{{\mathbb{V}\left[#1\right]}}\newcommand{covar}[2]{\mathbb{C}\text{ov}\left[#1,#2\right]}\newcommand{corr}[2]{\mathbb{C}\text{or}\left[#1,#2\right]}\newcommand{argmin}{\mathrm{argmin}}\def\rv{z}\def\reals{\mathbb{R}}\def\rvset{{\mathcal{Z}}}\def\pdf{\rho}\def\rvdom{\Gamma}\def\coloneqq{\colon=}\newcommand{norm}{\lVert #1 \rVert}\def\argmax{\operatorname{argmax}}\def\ai{\alpha}\def\bi{\beta}\newcommand{\dx}[1]{\;\mathrm{d}#1}\newcommand{mat}[1]{{\boldsymbol{\mathrm{#1}}}}$$"
168+
sphinx_gallery_conf['first_notebook_cell'] = r"Add latex macros$$\newcommand{\V}[1]{{\boldsymbol{#1}}}\newcommand{mean}[1]{{\mathbb{E}\left[#1\right]}}\newcommand{var}[1]{{\mathbb{V}\left[#1\right]}}\newcommand{covar}[2]{\mathbb{C}\text{ov}\left[#1,#2\right]}\newcommand{corr}[2]{\mathbb{C}\text{or}\left[#1,#2\right]}\newcommand{argmin}{\mathrm{argmin}}\def\rv{z}\def\reals{\mathbb{R}}\def\rvset{{\mathcal{Z}}}\def\pdf{\rho}\def\rvdom{\Gamma}\def\coloneqq{\colon=}\newcommand{norm}{\lVert #1 \rVert}\def\argmax{\operatorname{argmax}}\def\ai{\alpha}\def\bi{\beta}\newcommand{\dx}[1]{\;\text{d}#1}\newcommand{\mat}[1]{{\boldsymbol{\mathrm{#1}}}}$$"
168169

169170
# if change conf make sure to remove source/auto_examples, using make clean
170171
# Note sphinx can use align with single line, e.g. a=1 & & b=1 if \\ is added to the end of the line, i.e a=1 & & b=1\\
@@ -265,11 +266,10 @@ def __call__(self, filename):
265266
\def\rvdom{\Gamma}
266267
\def\coloneqq{\colon=}
267268
\newcommand{\norm}[1]{\lVert #1 \rVert}
268-
%\def\argmax{\operatorname{argmax}}
269269
\def\ai{\alpha}
270270
\def\bi{\beta}
271-
\newcommand{\dx}[1]{\;\mathrm{d}#1}
272-
\newcommand{mat}[1]{{\boldsymbol{\mathrm{#1}}}}
271+
\newcommand{\dx}[1]{\;\text{d}#1}
272+
\newcommand{\mat}[1]{{\boldsymbol{\mathrm{#1}}}}
273273
''',
274274
}
275275

@@ -327,7 +327,7 @@ def __call__(self, filename):
327327
"corr": [r'\mathbb{C}\text{or}\left[#1,#2\right]', 2],
328328
"ai": r'\alpha',
329329
"bi": r'\beta',
330-
"dx": [r'\;\mathrm{d}#1', 1],
330+
"dx": [r'\;\text{d}#1', 1],
331331
"mat": [r'{\boldsymbol{\mathrm{#1}}}', 1],
332332
},
333333
},

pyapprox/surrogates/interp/barycentric_interpolation.py

Lines changed: 0 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -513,54 +513,3 @@ def tensor_product_barycentric_lagrange_interpolation(
513513
if not return_all:
514514
return interp_vals
515515
return interp_vals, grid_samples
516-
517-
518-
def plot_tensor_product_lagrange_basis_2d(
519-
univariate_quad_rule, npts, ii, jj, ax=None):
520-
abscissa = univariate_quad_rule(npts)[0]
521-
abscissa_1d = [abscissa, abscissa]
522-
barycentric_weights_1d = [compute_barycentric_weights_1d(abscissa_1d[0]),
523-
compute_barycentric_weights_1d(abscissa_1d[1])]
524-
training_samples = cartesian_product(abscissa_1d, 1)
525-
fn_vals = np.zeros((training_samples.shape[1], 1))
526-
idx = jj*abscissa_1d[1].shape[0]+ii
527-
fn_vals[idx] = 1.
528-
529-
def f(samples): return multivariate_barycentric_lagrange_interpolation(
530-
samples, abscissa_1d, barycentric_weights_1d, fn_vals,
531-
np.array([0, 1]))
532-
533-
plot_limits = [-1, 1, -1, 1]
534-
num_pts_1d = 101
535-
X, Y, Z = get_meshgrid_function_data(f, plot_limits, num_pts_1d)
536-
if ax is None:
537-
ax = create_3d_axis()
538-
cmap = mpl.cm.coolwarm
539-
540-
plot_surface(X, Y, Z, ax, axis_labels=None, limit_state=None,
541-
alpha=0.3, cmap=mpl.cm.coolwarm, zorder=3, plot_axes=False)
542-
num_contour_levels = 30
543-
offset = -(Z.max()-Z.min())/2
544-
cmap = mpl.cm.gray
545-
ax.contourf(
546-
X, Y, Z, zdir='z', offset=offset,
547-
levels=np.linspace(Z.min(), Z.max(), num_contour_levels),
548-
cmap=cmap, zorder=-1)
549-
ax.plot(training_samples[0, :], training_samples[1, :],
550-
offset*np.ones(training_samples.shape[1]), 'o',
551-
zorder=100, color='b')
552-
553-
x = np.linspace(-1, 1, 100)
554-
y = training_samples[1, idx]*np.ones((x.shape[0]))
555-
z = f(np.vstack((x[np.newaxis, :], y[np.newaxis, :])))[:, 0]
556-
ax.plot(x, Y.max()*np.ones((x.shape[0])), z, '-r')
557-
ax.plot(abscissa_1d[0], Y.max()*np.ones(
558-
(abscissa_1d[0].shape[0])), np.zeros(abscissa_1d[0].shape[0]), 'or')
559-
560-
y = np.linspace(-1, 1, 100)
561-
x = training_samples[0, idx]*np.ones((y.shape[0]))
562-
z = f(np.vstack((x[np.newaxis, :], y[np.newaxis, :])))[:, 0]
563-
ax.plot(X.min()*np.ones((x.shape[0])), y, z, '-r')
564-
ax.plot(X.min()*np.ones(
565-
(abscissa_1d[1].shape[0])), abscissa_1d[1],
566-
np.zeros(abscissa_1d[1].shape[0]), 'or')

pyapprox/surrogates/interp/tensorprod.py

Lines changed: 58 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -504,15 +504,11 @@ def get_univariate_interpolation_basis(basis_type):
504504
return basis_dict[basis_type]()
505505

506506

507-
class TensorProductInterpolant():
507+
class TensorProductBasis():
508508
def __init__(self, bases_1d):
509509
self._bases_1d = bases_1d
510510

511-
self._nodes_1d = None
512-
self._nnodes_1d = None
513-
self._values = None
514-
515-
def basis(self, nodes_1d, samples):
511+
def __call__(self, nodes_1d, samples):
516512
# assumes each array in nodes_1d is in ascending order
517513
nvars = len(nodes_1d)
518514
nnodes_1d = np.array([n.shape[0] for n in nodes_1d])
@@ -535,11 +531,65 @@ def basis(self, nodes_1d, samples):
535531
indices = cartesian_product([np.arange(N) for N in nnodes_1d_active])
536532
nindices = indices.shape[1]
537533
temp2 = temp1[indices.ravel()+np.repeat(
538-
np.arange(nactive_vars)*basis_vals_1d.shape[1], nindices), :].reshape(
539-
nactive_vars, nindices, nsamples)
534+
np.arange(nactive_vars)*basis_vals_1d.shape[1],
535+
nindices), :].reshape(nactive_vars, nindices, nsamples)
540536
basis_vals = np.prod(temp2, axis=0).T
541537
return basis_vals
542538

539+
def _single_basis_fun(self, nodes_1d, idx, xx):
540+
return self(nodes_1d, xx)[:, idx]
541+
542+
def plot_single_basis(self, ax, nodes_1d, ii, jj, nodes=None,
543+
plot_limits=[-1, 1, -1, 1],
544+
num_pts_1d=101, surface_cmap="coolwarm",
545+
contour_cmap="gray"):
546+
from pyapprox.util.visualization import (
547+
get_meshgrid_function_data, plot_surface)
548+
idx = jj*nodes_1d[0].shape[0]+ii
549+
single_basis_fun = partial(self._single_basis_fun, nodes_1d, idx)
550+
X, Y, Z = get_meshgrid_function_data(
551+
single_basis_fun, plot_limits, num_pts_1d)
552+
if surface_cmap is not None:
553+
plot_surface(X, Y, Z, ax, axis_labels=None, limit_state=None,
554+
alpha=0.3, cmap="coolwarm", zorder=3, plot_axes=False)
555+
if contour_cmap is not None:
556+
num_contour_levels = 30
557+
offset = -(Z.max()-Z.min())/2
558+
ax.contourf(
559+
X, Y, Z, zdir='z', offset=offset,
560+
levels=np.linspace(Z.min(), Z.max(), num_contour_levels),
561+
cmap="gray", zorder=-1)
562+
563+
if nodes is None:
564+
return
565+
ax.plot(nodes[0, :], nodes[1, :],
566+
offset*np.ones(nodes.shape[1]), 'o',
567+
zorder=100, color='b')
568+
569+
x = np.linspace(-1, 1, 100)
570+
y = nodes[1, idx]*np.ones((x.shape[0]))
571+
z = single_basis_fun(np.vstack((x[None, :], y[None, :])))
572+
ax.plot(x, Y.max()*np.ones((x.shape[0])), z, '-r')
573+
ax.plot(nodes_1d[0], Y.max()*np.ones(
574+
(nodes_1d[0].shape[0])), np.zeros(nodes_1d[0].shape[0]), 'or')
575+
576+
y = np.linspace(-1, 1, 100)
577+
x = nodes[0, idx]*np.ones((y.shape[0]))
578+
z = single_basis_fun(np.vstack((x[None, :], y[None, :])))
579+
ax.plot(X.min()*np.ones((x.shape[0])), y, z, '-r')
580+
ax.plot(X.min()*np.ones(
581+
(nodes_1d[1].shape[0])), nodes_1d[1],
582+
np.zeros(nodes_1d[1].shape[0]), 'or')
583+
584+
585+
class TensorProductInterpolant():
586+
def __init__(self, bases_1d):
587+
self.basis = TensorProductBasis(bases_1d)
588+
589+
self._nodes_1d = None
590+
self._nnodes_1d = None
591+
self._values = None
592+
543593
def tensor_product_grid(self, nodes_1d):
544594
return cartesian_product(nodes_1d)
545595

tutorials/multi_fidelity/plot_ensemble_selection.py

Lines changed: 55 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from pyapprox.interface.wrappers import WorkTrackingModel, TimerModel
1414
from pyapprox.util.visualization import mathrm_label
1515

16-
np.random.seed()
16+
np.random.seed(1)
1717

1818
#%%
1919
#Configure the benchmark
@@ -41,8 +41,6 @@
4141
# Add wraper to compute the time each model takes to run
4242
funs = [WorkTrackingModel(
4343
TimerModel(fun), base_model=fun) for fun in benchmark.funs]
44-
for fun in funs:
45-
print(fun)
4644

4745
#%%
4846
#Run the pilot study
@@ -66,17 +64,22 @@
6664
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
6765
multifidelity.plot_model_costs(model_costs, ax=ax)
6866

67+
ax = plt.subplots(1, 1, figsize=(16, 12))[1]
68+
_ = multifidelity.plot_correlation_matrix(
69+
multifidelity.get_correlation_from_covariance(stat._cov.numpy()), ax=ax,
70+
format_string=None)
71+
6972
#%%
70-
#Find the best estimator
71-
#-----------------------
72-
# The following code finds the best estimator by iterating over all possible subsets of models. Note this code can easily be modified to also iterate over a list of estimator types.
73+
#Find the best subset of of low-fidelity models
74+
#----------------------------------------------
75+
# The following code finds the subset of models that minimizes the variance of a single estimator by iterating over all possible subsets of models and computing the optimal sample allocation and assocated estimator variance. Here we restrict our attention to model subsets that contain at most 4 models.
7376

7477
# Some MFMC estimators will fail because the models
7578
# do not satisfy its hierarchical condition so set
7679
# allow_failures=True
7780
best_est = multifidelity.get_estimator(
7881
"mfmc", stat, model_costs, allow_failures=True,
79-
max_nmodels=4, save_candidate_estimators=True)
82+
max_nmodels=5, save_candidate_estimators=True)
8083

8184
target_cost = 1e2
8285
best_est.allocate_samples(target_cost)
@@ -110,3 +113,48 @@
110113
best_ests, est_labels, ax)
111114
ax.set_xlabel(mathrm_label("Low fidelity models"))
112115
plt.show()
116+
117+
#%%
118+
#Find the best estimator
119+
#-----------------------
120+
#We can also find the best estimator from a list of estimator types while still determining the best model subset. This code chooses the best estimator from two possible parameterized ACV estimator classes. Specifically it chooses from all possible generalized recursive difference (GRD) estimators and genearlized multifidelity estimators that use at most 3 models and have a maximum tree depth of 3.
121+
best_est = multifidelity.get_estimator(
122+
["grd", "gmf"], stat, model_costs, allow_failures=True,
123+
max_nmodels=3, tree_depth=3,
124+
save_candidate_estimators=True)
125+
126+
target_cost = 1e2
127+
best_est.allocate_samples(target_cost)
128+
print("Predicted variance",
129+
best_est._covariance_from_npartition_samples(
130+
best_est._rounded_npartition_samples))
131+
132+
# Sort candidate estimators into lists with the same estimator type
133+
from collections import defaultdict
134+
est_dict = defaultdict(list)
135+
for result in best_est._candidate_estimators:
136+
if result[0] is None:
137+
# skip failures
138+
continue
139+
est_dict[result[0].__class__.__name__].append(result)
140+
141+
142+
est_name_list = list(est_dict.keys())
143+
est_name_list.sort()
144+
best_est_indices = [
145+
np.argmin([result[0]._optimized_criteria for result in est_dict[name]])
146+
for name in est_name_list]
147+
best_ests = [est_dict[est_name_list[ii]][best_est_indices[ii]][0]
148+
for ii in range(len(est_name_list))]
149+
est_labels = [
150+
"{0}({1}, {2})".format(
151+
est_name_list[ii],
152+
est_dict[est_name_list[ii]][best_est_indices[ii]][1],
153+
est_dict[est_name_list[ii]][best_est_indices[ii]][0]._recursion_index)
154+
for ii in range(len(est_name_list))]
155+
156+
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
157+
_ = multifidelity.plot_estimator_variance_reductions(
158+
best_ests, est_labels, ax)
159+
ax.set_xlabel(mathrm_label("Estimator types"))
160+
plt.show()

tutorials/multi_fidelity/plot_multioutput_acv.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,14 @@
1717
nmodels = 3
1818

1919
cov = benchmark.covariance
20-
W = benchmark.fun.covariance_of_centered_values_kronker_product()
21-
B = benchmark.fun.covariance_of_mean_and_variance_estimators()
20+
21+
labels = ([r"$(f_{0})_{%d}$" % (ii+1) for ii in range(benchmark.nqoi)] +
22+
[r"$(f_{2})_{%d}$" % (ii+1) for ii in range(benchmark.nqoi)] +
23+
[r"$(f_{2})_{%d}$" % (ii+1) for ii in range(benchmark.nqoi)])
24+
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
25+
_ = mf.plot_correlation_matrix(
26+
mf.get_correlation_from_covariance(cov), ax=ax, model_names=labels,
27+
label_fontsize=20)
2228

2329
target_cost = 10
2430
stat = mf.multioutput_stats["mean"](benchmark.nqoi)

tutorials/multi_fidelity/plot_multioutput_monte_carlo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
#Now construct the estimator. The following requires the costs of the model
5252
#the covariance and some other quantities W and B. These four quantities are not
5353
#needed to compute the value of the estimator from a sample set, however they are needed to compute the MSE of the estimator and the number of samples of that model for a given target cost. We load these quantities but ignore there meaning for the moment.
54-
costs = [1]
54+
costs = np.array([1])
5555
nqoi = 3
5656
cov = benchmark.covariance[:3, :3]
5757
W = benchmark.fun.covariance_of_centered_values_kronker_product()[:9, :9]

tutorials/multi_fidelity/plot_pilot_studies.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ def compute_mse(build_acv, funs, variable, target_cost, npilot_samples,
183183
oracle_mse = oracle_est._optimized_covariance[0, 0].item()
184184
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
185185
ax.axhline(y=oracle_mse, ls='--', color='k', label=mathrm_label("Oracle MSE"))
186-
ax.axhline(y=mc_mse, ls=':', color='r', label=mathrm_label("MC MSE"))
186+
ax.axhline(y=mc_mse, ls=':', color='r', label=mathrm_label("Oracle MC MSE"))
187187
ax.plot(npilot_samples_list, mse_list, '-o', label=mathrm_label("Pilot MSE"))
188188
ax.set_xlabel(mathrm_label("Number of pilot samples"))
189189
_ = ax.legend()
@@ -212,7 +212,7 @@ def compute_mse(build_acv, funs, variable, target_cost, npilot_samples,
212212

213213
ax = plt.subplots(1, 1, figsize=(8, 6))[1]
214214
ax.axhline(y=oracle_mse, ls='--', color='k', label=mathrm_label("Oracle MSE"))
215-
ax.axhline(y=mc_mse, ls=':', color='r', label=mathrm_label("MC MSE"))
215+
ax.axhline(y=mc_mse, ls=':', color='r', label=mathrm_label("Oracle MC MSE"))
216216
ax.plot(npilot_samples_list, mse_list, '-o', label=mathrm_label("Pilot MSE"))
217217
ax.set_xlabel(mathrm_label("Number of pilot samples"))
218218
_ = ax.legend()

0 commit comments

Comments
 (0)