Skip to content

Commit

Permalink
[GLOP] honor time limits better
Browse files Browse the repository at this point in the history
  • Loading branch information
lperron committed Feb 6, 2025
1 parent 3a73fa1 commit 05c362a
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 2 deletions.
2 changes: 2 additions & 0 deletions ortools/glop/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ cc_library(
"//ortools/lp_data:lp_utils",
"//ortools/lp_data:scattered_vector",
"//ortools/util:stats",
"//ortools/util:time_limit",
],
)

Expand Down Expand Up @@ -330,6 +331,7 @@ cc_library(
"//ortools/lp_data:lp_utils",
"//ortools/lp_data:scattered_vector",
"//ortools/util:stats",
"//ortools/util:time_limit",
],
)

Expand Down
14 changes: 12 additions & 2 deletions ortools/glop/dual_edge_norms.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ void DualEdgeNorms::UpdateBeforeBasisPivot(

// Avoid 0.0 norms (The 1e-4 is the value used by Koberstein).
// TODO(user): use a more precise lower bound depending on the column norm?
// We can do that with Cauchy-Swartz inequality:
// We can do that with Cauchy-Schwarz inequality:
// (edge . leaving_column)^2 = 1.0 < ||edge||^2 * ||leaving_column||^2
const Fractional kLowerBound = 1e-4;
if (output[e.row()] < kLowerBound) {
Expand All @@ -121,13 +121,23 @@ void DualEdgeNorms::UpdateBeforeBasisPivot(
void DualEdgeNorms::ComputeEdgeSquaredNorms() {
SCOPED_TIME_STAT(&stats_);

// time_limit_->LimitReached() can be costly sometimes, so we only do that
// if we feel this will be slow anyway.
const bool test_limit = (time_limit_ != nullptr) &&
basis_factorization_.NumberOfEntriesInLU() > 10'000;

// Since we will do a lot of inversions, it is better to be as efficient and
// precise as possible by having a refactorized basis.
DCHECK(basis_factorization_.IsRefactorized());
const RowIndex num_rows = basis_factorization_.GetNumberOfRows();
edge_squared_norms_.resize(num_rows, 0.0);
edge_squared_norms_.resize(num_rows, 1.0);
for (RowIndex row(0); row < num_rows; ++row) {
edge_squared_norms_[row] = basis_factorization_.DualEdgeSquaredNorm(row);

// This operation can be costly, and we abort if we are stuck here.
// Note that we still mark edges as "recomputed" otherwise we can runs into
// some DCHECK before we actually abort the solve.
if (test_limit && time_limit_->LimitReached()) break;
}
recompute_edge_squared_norms_ = false;
}
Expand Down
4 changes: 4 additions & 0 deletions ortools/glop/dual_edge_norms.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "ortools/lp_data/permutation.h"
#include "ortools/lp_data/scattered_vector.h"
#include "ortools/util/stats.h"
#include "ortools/util/time_limit.h"

namespace operations_research {
namespace glop {
Expand Down Expand Up @@ -102,6 +103,8 @@ class DualEdgeNorms {
parameters_ = parameters;
}

void SetTimeLimit(TimeLimit* time_limit) { time_limit_ = time_limit; }

// Stats related functions.
std::string StatString() const { return stats_.StatString(); }

Expand Down Expand Up @@ -130,6 +133,7 @@ class DualEdgeNorms {

// Parameters.
GlopParameters parameters_;
TimeLimit* time_limit_ = nullptr;

// Problem data that should be updated from outside.
const BasisFactorization& basis_factorization_;
Expand Down
2 changes: 2 additions & 0 deletions ortools/glop/revised_simplex.cc
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,8 @@ ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal(

SOLVER_LOG(logger_, "");
primal_edge_norms_.SetTimeLimit(time_limit);
dual_edge_norms_.SetTimeLimit(time_limit);

if (logger_->LoggingIsEnabled()) {
DisplayBasicVariableStatistics();
}
Expand Down

0 comments on commit 05c362a

Please sign in to comment.