diff --git a/ortools/glop/BUILD.bazel b/ortools/glop/BUILD.bazel index da89eef6ffe..da97e5d2a8b 100644 --- a/ortools/glop/BUILD.bazel +++ b/ortools/glop/BUILD.bazel @@ -265,6 +265,7 @@ cc_library( "//ortools/lp_data:lp_utils", "//ortools/lp_data:scattered_vector", "//ortools/util:stats", + "//ortools/util:time_limit", ], ) @@ -330,6 +331,7 @@ cc_library( "//ortools/lp_data:lp_utils", "//ortools/lp_data:scattered_vector", "//ortools/util:stats", + "//ortools/util:time_limit", ], ) diff --git a/ortools/glop/dual_edge_norms.cc b/ortools/glop/dual_edge_norms.cc index 4e81385ddf5..db19bc3d0ae 100644 --- a/ortools/glop/dual_edge_norms.cc +++ b/ortools/glop/dual_edge_norms.cc @@ -105,7 +105,7 @@ void DualEdgeNorms::UpdateBeforeBasisPivot( // Avoid 0.0 norms (The 1e-4 is the value used by Koberstein). // TODO(user): use a more precise lower bound depending on the column norm? - // We can do that with Cauchy-Swartz inequality: + // We can do that with Cauchy-Schwarz inequality: // (edge . leaving_column)^2 = 1.0 < ||edge||^2 * ||leaving_column||^2 const Fractional kLowerBound = 1e-4; if (output[e.row()] < kLowerBound) { @@ -121,13 +121,23 @@ void DualEdgeNorms::UpdateBeforeBasisPivot( void DualEdgeNorms::ComputeEdgeSquaredNorms() { SCOPED_TIME_STAT(&stats_); + // time_limit_->LimitReached() can be costly sometimes, so we only do that + // if we feel this will be slow anyway. + const bool test_limit = (time_limit_ != nullptr) && + basis_factorization_.NumberOfEntriesInLU() > 10'000; + // Since we will do a lot of inversions, it is better to be as efficient and // precise as possible by having a refactorized basis. DCHECK(basis_factorization_.IsRefactorized()); const RowIndex num_rows = basis_factorization_.GetNumberOfRows(); - edge_squared_norms_.resize(num_rows, 0.0); + edge_squared_norms_.resize(num_rows, 1.0); for (RowIndex row(0); row < num_rows; ++row) { edge_squared_norms_[row] = basis_factorization_.DualEdgeSquaredNorm(row); + + // This operation can be costly, and we abort if we are stuck here. + // Note that we still mark edges as "recomputed" otherwise we can runs into + // some DCHECK before we actually abort the solve. + if (test_limit && time_limit_->LimitReached()) break; } recompute_edge_squared_norms_ = false; } diff --git a/ortools/glop/dual_edge_norms.h b/ortools/glop/dual_edge_norms.h index bf5ba3f7833..e0cbc7d795b 100644 --- a/ortools/glop/dual_edge_norms.h +++ b/ortools/glop/dual_edge_norms.h @@ -23,6 +23,7 @@ #include "ortools/lp_data/permutation.h" #include "ortools/lp_data/scattered_vector.h" #include "ortools/util/stats.h" +#include "ortools/util/time_limit.h" namespace operations_research { namespace glop { @@ -102,6 +103,8 @@ class DualEdgeNorms { parameters_ = parameters; } + void SetTimeLimit(TimeLimit* time_limit) { time_limit_ = time_limit; } + // Stats related functions. std::string StatString() const { return stats_.StatString(); } @@ -130,6 +133,7 @@ class DualEdgeNorms { // Parameters. GlopParameters parameters_; + TimeLimit* time_limit_ = nullptr; // Problem data that should be updated from outside. const BasisFactorization& basis_factorization_; diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 9961a5e4742..0ed83b1e366 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -221,6 +221,8 @@ ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal( SOLVER_LOG(logger_, ""); primal_edge_norms_.SetTimeLimit(time_limit); + dual_edge_norms_.SetTimeLimit(time_limit); + if (logger_->LoggingIsEnabled()) { DisplayBasicVariableStatistics(); }