@@ -34,7 +34,7 @@ import Ipopt
34
34
# \begin{array}{r l}
35
35
# \min\limits_{x} & x_1^2 + x_2^2 + z \\
36
36
# s.t. & \begin{array}{r l}
37
- # z \ge \max\limits_{y} & x_1^2 y_1 + x_2^2 * y_2 - x_1 y_1^4 - 2 x_2 y_2^4 \\
37
+ # z \ge \max\limits_{y} & x_1^2 y_1 + x_2^2 y_2 - x_1 y_1^4 - 2 x_2 y_2^4 \\
38
38
# s.t. & (y_1 - 10)^2 + (y_2 - 10)^2 \le 25
39
39
# \end{array} \\
40
40
# & x \ge 0.
@@ -54,7 +54,7 @@ import Ipopt
54
54
55
55
# ```math
56
56
# \begin{array}{r l}
57
- # V(x_1, x_z) = \max\limits_{y} & x_1^2 y_1 + x_2^2 * y_2 - x_1 y_1^4 - 2 x_2 y_2^4 \\
57
+ # V(x_1, x_z) = \max\limits_{y} & x_1^2 y_1 + x_2^2 y_2 - x_1 y_1^4 - 2 x_2 y_2^4 \\
58
58
# s.t. & (y_1 - 10)^2 + (y_2 - 10)^2 \le 25
59
59
# \end{array}
60
60
# ```
@@ -74,7 +74,7 @@ function solve_lower_level(x...)
74
74
return objective_value (model), value .(y)
75
75
end
76
76
77
- # This function takes a guess of ``x``, and returns the optimal lower-level
77
+ # This function takes a guess of ``x`` and returns the optimal lower-level
78
78
# objective-value and the optimal response ``y``. The reason why we need both
79
79
# the objective and the optimal ``y`` will be made clear shortly, but for now
80
80
# let us define:
@@ -84,7 +84,7 @@ function V(x...)
84
84
return f
85
85
end
86
86
87
- # We can substitute ``V`` into our full problem to create:
87
+ # Then, we can substitute ``V`` into our full problem to create:
88
88
89
89
# ```math
90
90
# \begin{array}{r l}
96
96
# This looks like a nonlinear optimization problem with a user-defined function
97
97
# ``V``! However, because ``V`` solves an optimization problem internally, we
98
98
# can't use automatic differentiation to compute the first and second
99
- # derivatives.
99
+ # derivatives. Instead, we can use JuMP's ability to pass callback functions
100
+ # for the gradient and hessian instead.
100
101
101
102
# First up, we need to define the gradient of ``V`` with respect to ``x``. In
102
103
# general, this may be difficult to compute, but because ``x`` appears only in
146
147
147
148
# This solution approach worked, but it has a performance problem: every time
148
149
# we needed to compute the value, gradient, or hessian of ``V``, we had to
149
- # resolve the lower-level optimization problem! This is wasteful, because we
150
+ # re-solve the lower-level optimization problem! This is wasteful, because we
150
151
# will often call the gradient and hessian at the same point, and so solving the
151
152
# problem twice with the same input repeats work unnecessarily.
152
153
153
154
# We can work around this by using memoization:
154
155
155
156
function memoized_solve_lower_level ()
156
- last_x, f, y = nothing , 0.0 , [NaN , NaN ]
157
+ last_x, f, y = nothing , NaN , [NaN , NaN ]
157
158
function _update_if_needed (x... )
158
159
if last_x != x
159
160
f, y = solve_lower_level (x... )
0 commit comments