Skip to content

Commit 42d281f

Browse files
authored
Merge pull request #236 from Simple-Robotics/topic/gar/devel
topic/gar/devel
2 parents 44cabf0 + 4d2671f commit 42d281f

30 files changed

+777
-519
lines changed

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1717

1818
- `SolverProxDDP`: add temporary vectors for linesearch
1919
- `SolverProxDDP`: remove exceptions from `computeMultipliers()`, return a bool flag
20+
- `gar`: rework and move sparse matrix utilities to `gar/utils.hpp`
2021
- `SolverProxDDP`: Rename `maxRefinementSteps_` and `refinementThreshold_` to snake-case
2122
- `SolverProxDDP`: make `linesearch_` public
2223
- Change uses of `ConstraintSetBase` template class to `ConstraintSetTpl` (following changes in proxsuite-nlp 0.9.0) ([#223](https://github.com/Simple-Robotics/aligator/pull/233))
2324
- [gar] Throw an exception if trying to instantiate `ParallelRiccatiSolver` with num_threads smaller than 2.
2425
- [API BREAKING] Rename friction cone for centroidal into CentroidalFrictionCone ([#234](https://github.com/Simple-Robotics/aligator/pull/234))
2526
- Change the linear multibody friction cone to the true "ice cream" cone ([#238](https://github.com/Simple-Robotics/aligator/pull/238))
27+
- [gar] Rework `RiccatiSolverDense` to not use inner struct `FactorData`
28+
- Various changes to `gar` tests and `test_util`, add `LQRKnot::isApprox()`
29+
30+
### Removed
31+
32+
- Default constructor for `LQRProblemTpl`
33+
- Removed header `gar/fwd.hpp` with forward-declarations
2634

2735
## [0.9.0] - 2024-10-11
2836

bindings/python/src/gar/expose-gar.cpp

Lines changed: 6 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#include "aligator/python/blk-matrix.hpp"
44
#include "aligator/gar/lqr-problem.hpp"
55
#include "aligator/gar/riccati-base.hpp"
6-
#include "aligator/gar/utils.hpp"
76

87
#include "aligator/python/utils.hpp"
98
#include "aligator/python/visitors.hpp"
@@ -24,17 +23,6 @@ using context::VectorXs;
2423

2524
using knot_vec_t = lqr_t::KnotVector;
2625

27-
bp::dict lqr_sol_initialize_wrap(const lqr_t &problem) {
28-
bp::dict out;
29-
auto ss = lqrInitializeSolution(problem);
30-
auto &[xs, us, vs, lbdas] = ss;
31-
out["xs"] = xs;
32-
out["us"] = us;
33-
out["vs"] = vs;
34-
out["lbdas"] = lbdas;
35-
return out;
36-
}
37-
3826
static void exposeBlockMatrices() {
3927
BlkMatrixPythonVisitor<BlkMatrix<MatrixXs, 2, 2>>::expose("BlockMatrix22");
4028
BlkMatrixPythonVisitor<BlkMatrix<VectorXs, 4, 1>>::expose("BlockVector4");
@@ -61,6 +49,8 @@ void exposeParallelSolver();
6149
void exposeDenseSolver();
6250
// fwd-declare exposeProxRiccati()
6351
void exposeProxRiccati();
52+
// fwd-declare exposeGarUtils()
53+
void exposeGarUtils();
6454

6555
void exposeGAR() {
6656

@@ -97,6 +87,9 @@ void exposeGAR() {
9787
.def_readwrite("Gu", &knot_t::Gu)
9888
.def_readwrite("gamma", &knot_t::gamma)
9989
//
90+
.def("isApprox", &knot_t::isApprox,
91+
("self"_a, "prec"_a = std::numeric_limits<Scalar>::epsilon()))
92+
//
10093
.def(CopyableVisitor<knot_t>())
10194
.def(PrintableVisitor<knot_t>());
10295

@@ -126,15 +119,7 @@ void exposeGAR() {
126119
.def("forward", &riccati_base_t::forward,
127120
("self"_a, "xs", "us", "vs", "lbdas", "theta"_a = std::nullopt));
128121

129-
bp::def(
130-
"lqrDenseMatrix",
131-
+[](const lqr_t &problem, Scalar mudyn, Scalar mueq) {
132-
auto mat_rhs = lqrDenseMatrix(problem, mudyn, mueq);
133-
return bp::make_tuple(std::get<0>(mat_rhs), std::get<1>(mat_rhs));
134-
},
135-
("problem"_a, "mudyn", "mueq"));
136-
137-
bp::def("lqrInitializeSolution", lqr_sol_initialize_wrap, ("problem"_a));
122+
exposeGarUtils();
138123

139124
#ifdef ALIGATOR_WITH_CHOLMOD
140125
exposeCholmodSolver();
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#include "aligator/python/fwd.hpp"
2+
#include "aligator/gar/utils.hpp"
3+
4+
#include <eigenpy/std-array.hpp>
5+
6+
namespace aligator::python {
7+
using namespace gar;
8+
9+
using context::Scalar;
10+
using lqr_t = LQRProblemTpl<context::Scalar>;
11+
12+
bp::dict lqr_sol_initialize_wrap(const lqr_t &problem) {
13+
bp::dict out;
14+
auto ss = lqrInitializeSolution(problem);
15+
auto &[xs, us, vs, lbdas] = ss;
16+
out["xs"] = xs;
17+
out["us"] = us;
18+
out["vs"] = vs;
19+
out["lbdas"] = lbdas;
20+
return out;
21+
}
22+
23+
void exposeGarUtils() {
24+
25+
bp::def(
26+
"lqrDenseMatrix",
27+
+[](const lqr_t &problem, Scalar mudyn, Scalar mueq) {
28+
auto mat_rhs = lqrDenseMatrix(problem, mudyn, mueq);
29+
return bp::make_tuple(std::get<0>(mat_rhs), std::get<1>(mat_rhs));
30+
},
31+
("problem"_a, "mudyn", "mueq"));
32+
33+
bp::def("lqrCreateSparseMatrix", lqrCreateSparseMatrix<Scalar>,
34+
("problem"_a, "mudyn", "mueq", "mat", "rhs", "update"),
35+
"Create or update a sparse matrix from an LQRProblem.");
36+
37+
bp::def("lqrInitializeSolution", lqr_sol_initialize_wrap, ("problem"_a));
38+
39+
bp::def("lqrComputeKktError", lqrComputeKktError<Scalar>,
40+
("problem"_a, "xs", "us", "vs", "lbdas", "mudyn", "mueq", "theta",
41+
"verbose"_a = false),
42+
"Compute the KKT residual of the LQR problem.");
43+
}
44+
} // namespace aligator::python

examples/acrobot.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,10 +70,11 @@ class Args(ArgsBase):
7070
)
7171

7272
tol = 1e-3
73-
mu_init = 0.01
73+
mu_init = 10.0
7474
solver = aligator.SolverProxDDP(tol, mu_init=mu_init, verbose=aligator.VERBOSE)
7575
solver.max_iters = 200
7676
solver.rollout_type = aligator.ROLLOUT_LINEAR
77+
solver.linear_solver_choice = aligator.LQ_SOLVER_STAGEDENSE
7778
solver.setup(problem)
7879

7980

gar/include/aligator/gar/cholmod-solver.hpp

Lines changed: 3 additions & 151 deletions
Original file line numberDiff line numberDiff line change
@@ -6,131 +6,9 @@
66
#include <Eigen/CholmodSupport>
77

88
#include "lqr-problem.hpp"
9-
#include "utils.hpp"
109
#include "aligator/context.hpp"
1110

1211
namespace aligator::gar {
13-
namespace helpers {
14-
/// @brief Helper to assign a dense matrix into a range of coefficients of a
15-
/// sparse matrix.
16-
template <bool Update, typename InType, typename OutScalar>
17-
void sparseAssignDenseBlock(Eigen::Index i0, Eigen::Index j0,
18-
const Eigen::DenseBase<InType> &input,
19-
Eigen::SparseMatrix<OutScalar> &out) {
20-
assert(i0 + input.rows() <= out.rows() && "Inconsistent rows");
21-
assert(j0 + input.cols() <= out.cols() && "Inconsistent cols");
22-
using Eigen::Index;
23-
for (Index i = 0; i < input.rows(); i++) {
24-
for (Index j = 0; j < input.cols(); j++) {
25-
if constexpr (Update) {
26-
out.coeffRef(i0 + i, j0 + j) = input(i, j);
27-
} else {
28-
out.insert(i0 + i, j0 + j) = input(i, j);
29-
}
30-
}
31-
}
32-
}
33-
} // namespace helpers
34-
35-
template <bool Update, typename Scalar>
36-
void lqrCreateSparseMatrix(const LQRProblemTpl<Scalar> &problem,
37-
const Scalar mudyn, const Scalar mueq,
38-
Eigen::SparseMatrix<Scalar> &mat,
39-
Eigen::Matrix<Scalar, -1, 1> &rhs) {
40-
using Eigen::Index;
41-
const uint nrows = lqrNumRows(problem);
42-
using knot_t = LQRKnotTpl<Scalar>;
43-
const auto &knots = problem.stages;
44-
45-
if constexpr (!Update) {
46-
mat.conservativeResize(nrows, nrows);
47-
mat.setZero();
48-
}
49-
50-
rhs.conservativeResize(nrows);
51-
rhs.setZero();
52-
const size_t N = size_t(problem.horizon());
53-
uint idx = 0;
54-
{
55-
uint nc0 = problem.nc0();
56-
rhs.head(nc0) = problem.g0;
57-
helpers::sparseAssignDenseBlock<Update>(0, nc0, problem.G0, mat);
58-
helpers::sparseAssignDenseBlock<Update>(nc0, 0, problem.G0.transpose(),
59-
mat);
60-
for (Index kk = 0; kk < nc0; kk++) {
61-
if constexpr (Update) {
62-
mat.coeffRef(kk, kk) = -mudyn;
63-
} else {
64-
mat.insert(kk, kk) = -mudyn;
65-
}
66-
}
67-
idx += nc0;
68-
}
69-
70-
for (size_t t = 0; t <= N; t++) {
71-
const knot_t &model = knots[t];
72-
const uint n = model.nx + model.nu + model.nc;
73-
// get block for current variables
74-
auto rhsblk = rhs.segment(idx, n);
75-
76-
rhsblk.head(model.nx) = model.q;
77-
rhsblk.segment(model.nx, model.nu) = model.r;
78-
rhsblk.tail(model.nc) = model.d;
79-
80-
// fill-in Q block
81-
helpers::sparseAssignDenseBlock<Update>(idx, idx, model.Q, mat);
82-
const Index i0 = idx + model.nx; // u
83-
// S block
84-
helpers::sparseAssignDenseBlock<Update>(i0, idx, model.S.transpose(), mat);
85-
helpers::sparseAssignDenseBlock<Update>(idx, i0, model.S, mat);
86-
// R block
87-
helpers::sparseAssignDenseBlock<Update>(i0, i0, model.R, mat);
88-
89-
const Index i1 = i0 + model.nu; // v
90-
// C block
91-
helpers::sparseAssignDenseBlock<Update>(i1, idx, model.C, mat);
92-
helpers::sparseAssignDenseBlock<Update>(idx, i1, model.C.transpose(), mat);
93-
// D block
94-
helpers::sparseAssignDenseBlock<Update>(i1, i0, model.D, mat);
95-
helpers::sparseAssignDenseBlock<Update>(i0, i1, model.D.transpose(), mat);
96-
97-
const Index i2 = i1 + model.nc;
98-
// dual block
99-
for (Index kk = i1; kk < i2; kk++) {
100-
if constexpr (Update) {
101-
mat.coeffRef(kk, kk) = -mueq;
102-
} else {
103-
mat.insert(kk, kk) = -mueq;
104-
}
105-
}
106-
107-
if (t != N) {
108-
rhs.segment(idx + n, model.nx2) = model.f;
109-
110-
// A
111-
helpers::sparseAssignDenseBlock<Update>(i2, idx, model.A, mat);
112-
helpers::sparseAssignDenseBlock<Update>(idx, i2, model.A.transpose(),
113-
mat);
114-
// B
115-
helpers::sparseAssignDenseBlock<Update>(i2, i0, model.B, mat);
116-
helpers::sparseAssignDenseBlock<Update>(i0, i2, model.B.transpose(), mat);
117-
// E
118-
const Index i3 = i2 + model.nx2;
119-
helpers::sparseAssignDenseBlock<Update>(i2, i3, model.E, mat);
120-
helpers::sparseAssignDenseBlock<Update>(i3, i2, model.E.transpose(), mat);
121-
122-
for (Index kk = i2; kk < i3; kk++) {
123-
if constexpr (Update) {
124-
mat.coeffRef(kk, kk) = -mudyn;
125-
} else {
126-
mat.insert(kk, kk) = -mudyn;
127-
}
128-
}
129-
130-
idx += n + model.nx2;
131-
}
132-
}
133-
}
13412

13513
/// @brief A sparse solver for the linear-quadratic problem based on CHOLMOD.
13614
template <typename _Scalar> class CholmodLqSolver {
@@ -143,26 +21,12 @@ template <typename _Scalar> class CholmodLqSolver {
14321

14422
explicit CholmodLqSolver(const Problem &problem, uint numRefinementSteps = 1);
14523

146-
bool backward(const Scalar mudyn, const Scalar mueq) {
147-
// update the sparse linear problem
148-
lqrCreateSparseMatrix<true>(*problem_, mudyn, mueq, kktMatrix, kktRhs);
149-
cholmod.factorize(kktMatrix);
150-
return cholmod.info() == Eigen::Success;
151-
}
24+
bool backward(const Scalar mudyn, const Scalar mueq);
15225

15326
bool forward(std::vector<VectorXs> &xs, std::vector<VectorXs> &us,
154-
std::vector<VectorXs> &vs, std::vector<VectorXs> &lbdas) const {
155-
kktSol = cholmod.solve(-kktRhs);
156-
kktResidual = kktRhs;
157-
for (uint i = 0; i < numRefinementSteps; i++) {
158-
kktResidual.noalias() += kktMatrix * kktSol;
159-
kktSol += cholmod.solve(-kktResidual);
160-
}
161-
lqrDenseSolutionToTraj(*problem_, kktSol, xs, us, vs, lbdas);
162-
return true;
163-
};
27+
std::vector<VectorXs> &vs, std::vector<VectorXs> &lbdas) const;
16428

165-
Scalar computeSparseResidual() const {
29+
inline Scalar computeSparseResidual() const {
16630
kktResidual = kktRhs;
16731
kktResidual.noalias() += kktMatrix * kktSol;
16832
return math::infty_norm(kktResidual);
@@ -184,18 +48,6 @@ template <typename _Scalar> class CholmodLqSolver {
18448
const Problem *problem_;
18549
};
18650

187-
template <typename Scalar>
188-
CholmodLqSolver<Scalar>::CholmodLqSolver(const Problem &problem,
189-
uint numRefinementSteps)
190-
: kktMatrix(), kktRhs(), cholmod(), numRefinementSteps(numRefinementSteps),
191-
problem_(&problem) {
192-
lqrCreateSparseMatrix<false>(problem, 1., 1., kktMatrix, kktRhs);
193-
assert(kktMatrix.cols() == kktRhs.rows());
194-
kktSol.resize(kktRhs.rows());
195-
kktResidual.resize(kktRhs.rows());
196-
cholmod.analyzePattern(kktMatrix);
197-
}
198-
19951
#ifdef ALIGATOR_ENABLE_TEMPLATE_INSTANTIATION
20052
extern template class CholmodLqSolver<context::Scalar>;
20153
#endif
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
/// @copyright Copyright (C) 2024 LAAS-CNRS, INRIA
2+
/// @author Wilson Jallet
3+
#pragma once
4+
5+
#ifdef ALIGATOR_WITH_CHOLMOD
6+
#include "cholmod-solver.hpp"
7+
#include "utils.hpp"
8+
9+
namespace aligator::gar {
10+
11+
template <typename Scalar>
12+
CholmodLqSolver<Scalar>::CholmodLqSolver(const Problem &problem,
13+
uint numRefinementSteps)
14+
: kktMatrix(), kktRhs(), cholmod(), numRefinementSteps(numRefinementSteps),
15+
problem_(&problem) {
16+
lqrCreateSparseMatrix(problem, 1., 1., kktMatrix, kktRhs, false);
17+
assert(kktMatrix.cols() == kktRhs.rows());
18+
kktSol.resize(kktRhs.rows());
19+
kktResidual.resize(kktRhs.rows());
20+
cholmod.analyzePattern(kktMatrix);
21+
}
22+
23+
template <typename Scalar>
24+
bool CholmodLqSolver<Scalar>::backward(const Scalar mudyn, const Scalar mueq) {
25+
// update the sparse linear problem
26+
lqrCreateSparseMatrix(*problem_, mudyn, mueq, kktMatrix, kktRhs, true);
27+
cholmod.factorize(kktMatrix);
28+
return cholmod.info() == Eigen::Success;
29+
}
30+
31+
template <typename Scalar>
32+
bool CholmodLqSolver<Scalar>::forward(std::vector<VectorXs> &xs,
33+
std::vector<VectorXs> &us,
34+
std::vector<VectorXs> &vs,
35+
std::vector<VectorXs> &lbdas) const {
36+
kktSol = cholmod.solve(-kktRhs);
37+
kktResidual = kktRhs;
38+
for (uint i = 0; i < numRefinementSteps; i++) {
39+
kktResidual.noalias() += kktMatrix * kktSol;
40+
kktSol += cholmod.solve(-kktResidual);
41+
}
42+
lqrDenseSolutionToTraj(*problem_, kktSol, xs, us, vs, lbdas);
43+
return true;
44+
};
45+
} // namespace aligator::gar
46+
#endif

0 commit comments

Comments
 (0)