Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ template <rk2_tensor Matrix> class DenseLUFactor {
TranspositionVector col_transpositions{};
double max_pivot{};

Eigen::Matrix<decltype(cabs(Scalar{})), size, size> accumulated_error{
Eigen::Matrix<decltype(cabs(Scalar{})), size, size>::Zero()};

// main loop
for (int8_t pivot = 0; pivot != size; ++pivot) {
int row_biggest_eigen{};
Expand Down Expand Up @@ -105,15 +108,50 @@ template <rk2_tensor Matrix> class DenseLUFactor {
col_transpositions[pivot] = col_biggest;
if (pivot != row_biggest) {
matrix.row(pivot).swap(matrix.row(row_biggest));
accumulated_error.row(pivot).swap(accumulated_error.row(row_biggest));
}
if (pivot != col_biggest) {
matrix.col(pivot).swap(matrix.col(col_biggest));
accumulated_error.row(pivot).swap(accumulated_error.row(col_biggest));
}

// check stability
Scalar const piv_size = cabs(matrix(pivot, pivot));
double const piv_error = accumulated_error(pivot, pivot);
if (cabs(matrix(pivot, pivot)) <= std::max(accumulated_error.row(pivot).tail(size - pivot).maxCoeff(),
Copy link
Member Author

@mgovers mgovers Dec 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is actually a very weak criterion. by the time your accumulated error even becomes close to your pivot element size, the rest of your calculation may already become garbage. E.g., if you want a precision of 1E-5, odds are that your calculation will not be as precise anymore when your matrix element precision is something like 1E-1. This criterion probably should be stricter:

Suggested change
if (cabs(matrix(pivot, pivot)) <= std::max(accumulated_error.row(pivot).tail(size - pivot).maxCoeff(),
if (cabs(matrix(pivot, pivot)) <= threshold * std::max(accumulated_error.row(pivot).tail(size - pivot).maxCoeff(),

accumulated_error.col(pivot).tail(size - pivot).maxCoeff())) {
throw SparseMatrixError{
3, std::format(
"Accumulated error is equal to or exceeds stability threshold: pivot magnitude = {}, max "
"accumulated error = {}.",
cabs(matrix(pivot, pivot)),
std::max(accumulated_error.row(pivot).tail(size - pivot).maxCoeff(),
accumulated_error.col(pivot).tail(size - pivot).maxCoeff()))};
}

// use Gaussian elimination to calculate the bottom right corner
if (pivot < size - 1) {
// calculate the pivot column
matrix.col(pivot).tail(size - pivot - 1) /= matrix(pivot, pivot);
accumulated_error.col(pivot).tail(size - pivot - 1) /= cabs(matrix(pivot, pivot));

struct AbsoluteStability {
typedef double result_type;
result_type operator()(Matrix::Scalar const& a, Matrix::Scalar const& b) const {
if (a == Scalar{} || b == Scalar{}) {
return 0.0;
}
return epsilon * (cabs(a) + cabs(b)) / 2;
}
};

// check for numerical stability by calculating the new diagonal entries and comparing to the old ones
accumulated_error.bottomRightCorner(size - pivot - 1, size - pivot - 1) +=
matrix.bottomRightCorner(size - pivot - 1, size - pivot - 1)
.binaryExpr(
(matrix.col(pivot).tail(size - pivot - 1) * matrix.row(pivot).tail(size - pivot - 1)),
Comment on lines +151 to +152
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TODO: also add the contribution from the accumulated_error.col(pivot).tail(size - pivot - 1) * matrix.row(pivot).tail(size - pivot - 1)) and (matrix.col(pivot).tail(size - pivot - 1) * accumulated_error.row(pivot).tail(size - pivot - 1)

AbsoluteStability{});

// calculate the bottom right corner
matrix.bottomRightCorner(size - pivot - 1, size - pivot - 1).noalias() -=
matrix.col(pivot).tail(size - pivot - 1) * matrix.row(pivot).tail(size - pivot - 1);
Expand All @@ -134,8 +172,9 @@ template <rk2_tensor Matrix> class DenseLUFactor {
// only check condition number if pivot perturbation is not used
double const pivot_threshold = has_pivot_perturbation ? 0.0 : epsilon * max_pivot;
for (int8_t pivot = 0; pivot != size; ++pivot) {
if (cabs(matrix(pivot, pivot)) < pivot_threshold || !is_normal(matrix(pivot, pivot))) {
throw SparseMatrixError{}; // can not specify error code
if (!is_normal(matrix(pivot, pivot))) {
throw SparseMatrixError{4, std::format("Non-normal dense pivot element {}: value = {}.", pivot,
cabs(matrix(pivot, pivot)))};
}
}
capturing::into_the_void(std::move(matrix));
Expand Down Expand Up @@ -280,7 +319,8 @@ template <class Tensor, class RHSVector, class XVector> class SparseLUSolver {
has_pivot_perturbation_);
}
if (!is_normal(lu_matrix[pivot_idx])) {
throw SparseMatrixError{};
throw SparseMatrixError{5, std::format("Non-normal sparse pivot element {}: value = {}.",
pivot_row_col, cabs(lu_matrix[pivot_idx]))};
}
return {};
}
Expand Down Expand Up @@ -432,7 +472,8 @@ template <class Tensor, class RHSVector, class XVector> class SparseLUSolver {
while (backward_error > epsilon_converge) {
// check maximum iteration, including one initial run
if (num_iter++ == max_iterative_refinement + 1) {
throw SparseMatrixError{};
throw SparseMatrixError{6, std::format("Iterative refinement did not converge after {} iterations.",
max_iterative_refinement)};
}
// solve with residual (first time it is the b vector)
solve_once(data, block_perm_array, residual_.value(), dx_.value());
Expand Down
122 changes: 122 additions & 0 deletions tests/cpp_unit_tests/test_sparse_lu_solver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

#include <doctest/doctest.h>

#include <ranges>

namespace power_grid_model::math_solver {
namespace {
using lu_trait_double = math_solver::sparse_lu_entry_trait<double, double, double>;
Expand Down Expand Up @@ -197,4 +199,124 @@ TEST_CASE("LU solver with ill-conditioned system") {
}
}

TEST_CASE("LU solver with multiple orders of magnitude") {
// [ [ [
// aa ab ac xa ya
// ba bb bc * xb = yb
// ca bc cc xc yc
// ] ] ]
//

using Matrix = RealTensor<asymmetric_t>;
using Solver = DenseLUFactor<Matrix>;

static constexpr auto epsilon = std::numeric_limits<double>::epsilon();
static constexpr auto regular_small_value = epsilon * 10; // ~1e-15
static constexpr auto very_small_value = epsilon / 10; // ~1e-17
static constexpr auto regular_large_value = 1.0 / regular_small_value; // ~1e+16
static constexpr auto very_large_value = 1.0 / very_small_value; // ~1e+17
static constexpr auto normal_value = 1.0;

auto const run = [](Matrix& data) {
Solver::BlockPerm block_perm{};
bool has_pivot_perturbation = false;
Solver::factorize_block_in_place(data.matrix(), block_perm, 0.0, false, has_pivot_perturbation);
return data;
};

constexpr auto all_values = [] {
return std::views::cartesian_product(std::array{regular_large_value, very_large_value},
std::array{normal_value},
std::array{regular_small_value, very_small_value});
};

SUBCASE("Different orders of magnitude only on diagonal") {
SUBCASE("Full matrix") {
// [ AA ab ac ]
// [ ba bb bc ]
// [ ca cb (cc)]
// factor 2 for (cc) needed to to avoid zeros on pivot
for (auto const& [large_value, normal_value, small_value] : all_values()) {
CAPTURE(large_value);
CAPTURE(normal_value);
CAPTURE(small_value);
auto data =
Matrix{large_value, normal_value, 2 * small_value, normal_value, normal_value, normal_value};
CHECK_NOTHROW(run(data));
}
}
SUBCASE("Tri-diagonal matrix") {
// [ AA ab 0 ]
// [ ba bb bc ]
// [ 0 cb (cc)]
for (auto const& [large_value, normal_value, small_value] : all_values()) {
CAPTURE(large_value);
CAPTURE(normal_value);
CAPTURE(small_value);
auto data = Matrix{large_value, normal_value, small_value, normal_value, 0.0, normal_value};
CHECK_NOTHROW(run(data));
}
}
SUBCASE("Full matrix with rounding errors") {
// [ AA ab ac ]
// [ ba bb bc ]
// [ ca cb (cc)]
for (auto const& [large_value, normal_value, small_value] : all_values()) {
CAPTURE(large_value);
CAPTURE(normal_value);
CAPTURE(small_value);
auto data = Matrix{large_value, normal_value, small_value, normal_value, normal_value, normal_value};
CHECK_NOTHROW(run(data));
}
}
}

SUBCASE("Different orders of magnitude in off-diagonal, but largest on diagonal") {
SUBCASE("Full matrix") {
// [ AA ab (ac)]
// [ ba bb (bc)]
// [(ca) (cb) (cc)]
for (auto const& [large_value, normal_value, small_value] : all_values()) {
CAPTURE(large_value);
CAPTURE(normal_value);
CAPTURE(small_value);
auto data = Matrix{large_value, normal_value, small_value, normal_value, small_value, small_value};
CHECK_NOTHROW(run(data));
}
}
SUBCASE("Tri-diagonal matrix") {
// [ AA ab 0 ]
// [ ba bb (bc)]
// [ 0 (cb) (cc)]
for (auto const& [large_value, normal_value, small_value] : all_values()) {
CAPTURE(large_value);
CAPTURE(normal_value);
CAPTURE(small_value);
auto data = Matrix{large_value, normal_value, small_value, normal_value, 0.0, small_value};
CHECK_NOTHROW(run(data));
}
}
}

SUBCASE("With small deviations: raises if deviations too small") {
// [ aa ab 0 ]
// [ ba bb (bc)]
// [ 0 (cb) (cc)]
// with aa = ab, bb = aa + delta and (cc) = (bc)
SUBCASE("Full matrix") {
SUBCASE("Small but not too small deviation") {
auto data =
Matrix{normal_value, normal_value + regular_small_value, regular_small_value, normal_value, 0,
regular_small_value};
CHECK_NOTHROW(run(data));
}
SUBCASE("Too small deviation results in cancelling values => another pivot element is selected => raises") {
auto data = Matrix{normal_value, normal_value + very_small_value, very_small_value, normal_value, 0,
very_small_value};
CHECK_THROWS_AS(run(data), SparseMatrixError);
}
}
}
}

} // namespace power_grid_model::math_solver
10 changes: 9 additions & 1 deletion tests/cpp_validation_tests/test_validation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -507,9 +507,17 @@ std::optional<CaseParam> construct_case(std::filesystem::path const& case_dir, j
json calculation_method_params;
calculation_method_params.update(j, true);
if (j.contains("extra_params")) {
if (json const& extra_params = j.at("extra_params"); extra_params.contains(calculation_method)) {
json const& extra_params = j.at("extra_params");
if (extra_params.contains(calculation_method)) {
calculation_method_params.update(extra_params.at(calculation_method), true);
}
if (extra_params.contains(sym ? "sym" : "asym")) {
calculation_method_params.update(extra_params.at(sym ? "sym" : "asym"), true);
}
if (extra_params.contains(std::format("{}-{}", sym ? "sym" : "asym", calculation_method))) {
calculation_method_params.update(
extra_params.at(std::format("{}-{}", sym ? "sym" : "asym", calculation_method)), true);
}
}

if (calculation_method_params.contains("raises")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,5 @@
"rtol": 1e-8,
"atol": {
"default": 1e-8
},
"raises": {
"raises": "SparseMatrixError",
"reason": "Observability check for meshed grids is not yet implemented. Also, pivot perturbation wouldn't be triggered as Matrix norm is smaller than the max pivot. See https://github.com/PowerGridModel/power-grid-model/pull/1118#issuecomment-3269339573"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,5 @@
"rtol": 1e-8,
"atol": {
"default": 1e-8
},
"raises": {
"raises": "SparseMatrixError",
"reason": "Observability check for meshed grids is not yet implemented. Also, pivot perturbation wouldn't be triggered as Matrix norm is smaller than the max pivot. See https://github.com/PowerGridModel/power-grid-model/pull/1118#issuecomment-3269339573"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,13 @@
"atol": {
"default": 1e-06,
".+_residual": 0.0001
},
"extra_params": {
"asym": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for asymmetric calculations."
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,13 @@
"atol": {
"default": 1e-06,
".+_residual": 0.0001
},
"extra_params": {
"asym": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for asymmetric calculations"
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,13 @@
"atol": {
"default": 1e-06,
".+_residual": 0.0001
},
"extra_params": {
"asym-newton_raphson": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for asymmetric calculations"
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,19 @@
"atol": {
"default": 1e-06,
".+_residual": 0.0001
},
"extra_params": {
"sym-newton_raphson": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for the Newton-Raphson calculation method"
}
},
"asym": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for asymmetric calculations"
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,19 @@
"atol": {
"default": 1e-06,
".+_residual": 0.0001
},
"extra_params": {
"sym-newton_raphson": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for the Newton-Raphson calculation method"
}
},
"asym": {
"raises": {
"raises": "SparseMatrixError",
"reason": "System is ill-conditioned for asymmetric calculations"
}
}
}
}
36 changes: 36 additions & 0 deletions tests/data/state_estimation/test-ill-conditioned/input.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"version": "1.0",
"type": "input",
"is_batch": false,
"attributes": {},
"data": {
"node": [
{"id": 0, "u_rated": 100000},
{"id": 2, "u_rated": 10000},
{"id": 3, "u_rated": 10000},
{"id": 4, "u_rated": 10000},
{"id": 5, "u_rated": 100000}
],
"line": [
{"id": 7, "from_node": 4, "to_node": 2, "from_status": 1, "to_status": 1, "r1": 1, "x1": 0.10000000000000001, "c1": 1.0000000000000001e-05, "tan1": 0, "r0": 10, "x0": 10, "c0": 9.9999999999999995e-07, "tan0": 0, "i_n": 100},
{"id": 8, "from_node": 4, "to_node": 3, "from_status": 1, "to_status": 1, "r1": 1, "x1": 0.10000000000000001, "c1": 1.0000000000000001e-05, "tan1": 0, "r0": 10, "x0": 10, "c0": 9.9999999999999995e-07, "tan0": 0, "i_n": 100},
{"id": 9, "from_node": 3, "to_node": 2, "from_status": 1, "to_status": 1, "r1": 1, "x1": 0.10000000000000001, "c1": 1.0000000000000001e-05, "tan1": 0, "r0": 10, "x0": 10, "c0": 9.9999999999999995e-07, "tan0": 0, "i_n": 100},
{"id": 10, "from_node": 0, "to_node": 5, "from_status": 1, "to_status": 1, "r1": 1, "x1": 0.10000000000000001, "c1": 1.0000000000000001e-05, "tan1": 0, "r0": 10, "x0": 10, "c0": 9.9999999999999995e-07, "tan0": 0, "i_n": 100}
],
"transformer": [
{"id": 1177, "from_node": 5, "to_node": 4, "from_status": 1, "to_status": 1, "u1": 100000, "u2": 10000, "sn": 33000000, "uk": 0.1, "pk": 68000, "i0": 0.01, "p0": 16400, "winding_from": 0, "winding_to": 2, "clock": 1, "tap_side": 0, "tap_pos": 2, "tap_min": -10, "tap_max": 10, "tap_nom": 0, "tap_size": 1600, "uk_min": 0.1, "uk_max": 0.1, "pk_min": 68000, "pk_max": 68000}
],
"source": [
{"id": 14, "node": 0, "status": 1, "u_ref": 1, "sk": 1000}
],
"sym_load": [
{"id": 15, "node": 4, "status": 1, "type": 0}
],
"sym_power_sensor": [
{"id": 18, "measured_object": 15, "measured_terminal_type": 4, "power_sigma": 100, "p_measured": 0, "q_measured": 0}
],
"sym_voltage_sensor": [
{"id": 19, "measured_object": 0, "u_sigma": 1000, "u_measured": 100000}
]
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
SPDX-FileCopyrightText: Contributors to the Power Grid Model project <[email protected]>

SPDX-License-Identifier: MPL-2.0
Loading
Loading