From 5badc421df6543f42f3a8827052c31ca7f4de7c1 Mon Sep 17 00:00:00 2001
From: Luc Maisonobe
Date: Tue, 26 Mar 2024 11:01:02 +0100
Subject: [PATCH] Fixed PMD and Spotbugs warnings.
---
.../src/conf/spotbugs-exclude-filter.xml | 1 +
.../differentiation/FieldGradient.java | 1 +
.../FieldUnivariateDerivative1.java | 1 +
.../differentiation/SparseGradient.java | 10 +-
...iseBicubicSplineInterpolatingFunction.java | 2 +-
.../polynomials/PolynomialSplineFunction.java | 2 +-
.../org/hipparchus/fraction/BigFraction.java | 3 +-
.../org/hipparchus/filtering/kalman/radar.txt | 15 +++
.../euclidean/threed/RotationOrder.java | 4 +-
.../FieldExplicitRungeKuttaIntegrator.java | 3 +-
.../src/conf/spotbugs-exclude-filter.xml | 55 ++++++++
.../ADMMQPModifiedRuizEquilibrium.java | 4 +-
.../vector/constrained/ADMMQPOptimizer.java | 39 +++---
.../vector/constrained/ADMMQPOption.java | 24 ++--
.../constrained/AbstractSQPOptimizer.java | 3 +-
.../vector/constrained/SQPOptimizerGM.java | 121 ++++++++----------
.../vector/constrained/SQPOptimizerS.java | 113 ++++++++--------
.../vector/constrained/SQPOption.java | 2 +-
.../VectorDifferentiableFunction.java | 1 +
.../vector/constrained/ADMMQPOptionTest.java | 8 +-
.../constrained/BoundedConstraintTest.java | 16 +++
.../vector/constrained/SQPOptionTest.java | 2 +-
.../descriptive/rank/PSquarePercentile.java | 14 +-
23 files changed, 252 insertions(+), 192 deletions(-)
diff --git a/hipparchus-core/src/conf/spotbugs-exclude-filter.xml b/hipparchus-core/src/conf/spotbugs-exclude-filter.xml
index 2e68ecbcd..1b4610e79 100644
--- a/hipparchus-core/src/conf/spotbugs-exclude-filter.xml
+++ b/hipparchus-core/src/conf/spotbugs-exclude-filter.xml
@@ -479,6 +479,7 @@
+
diff --git a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldGradient.java b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldGradient.java
index d44223840..ba761598a 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldGradient.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldGradient.java
@@ -476,6 +476,7 @@ public FieldGradient hypot(final FieldGradient y) {
* @param g1 first derivative of the function at the current point (i.e. at {@code g'(getValue())})
* @return g(this)
*/
+ @Override
public FieldGradient compose(final T g0, final T g1) {
final FieldGradient result = newInstance(g0);
for (int i = 0; i < grad.length; ++i) {
diff --git a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldUnivariateDerivative1.java b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldUnivariateDerivative1.java
index a0f4331eb..67078fe13 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldUnivariateDerivative1.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/FieldUnivariateDerivative1.java
@@ -359,6 +359,7 @@ public FieldUnivariateDerivative1 hypot(final FieldUnivariateDerivative1 y
* @param g1 first derivative of the function at the current point (i.e. at {@code g'(getValue())})
* @return g(this)
*/
+ @Override
public FieldUnivariateDerivative1 compose(final T g0, final T g1) {
return new FieldUnivariateDerivative1<>(g0, g1.multiply(f1));
}
diff --git a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/SparseGradient.java b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/SparseGradient.java
index 71a2cab42..db3228d48 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/SparseGradient.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/analysis/differentiation/SparseGradient.java
@@ -116,7 +116,7 @@ public SparseGradient withValue(final double v) {
* @return a new instance
*/
public static SparseGradient createConstant(final double value) {
- return new SparseGradient(value, Collections. emptyMap());
+ return new SparseGradient(value, Collections.emptyMap());
}
/** Factory method creating an independent variable.
@@ -152,6 +152,7 @@ public double getDerivative(final int index) {
* Get the value of the function.
* @return value of the function.
*/
+ @Override
public double getValue() {
return value;
}
@@ -218,7 +219,7 @@ public SparseGradient subtract(final SparseGradient a) {
@Override
public SparseGradient multiply(final SparseGradient a) {
final SparseGradient out =
- new SparseGradient(value * a.value, Collections. emptyMap());
+ new SparseGradient(value * a.value, Collections.emptyMap());
// Derivatives.
for (Map.Entry entry : derivatives.entrySet()) {
@@ -280,7 +281,7 @@ public SparseGradient multiply(final int n) {
/** {@inheritDoc} */
@Override
public SparseGradient divide(final SparseGradient a) {
- final SparseGradient out = new SparseGradient(value / a.value, Collections. emptyMap());
+ final SparseGradient out = new SparseGradient(value / a.value, Collections.emptyMap());
// Derivatives.
for (Map.Entry entry : derivatives.entrySet()) {
@@ -435,7 +436,7 @@ public SparseGradient copySign(final double sign) {
/** {@inheritDoc} */
@Override
public SparseGradient scalb(final int n) {
- final SparseGradient out = new SparseGradient(FastMath.scalb(value, n), Collections. emptyMap());
+ final SparseGradient out = new SparseGradient(FastMath.scalb(value, n), Collections.emptyMap());
for (Map.Entry entry : derivatives.entrySet()) {
out.derivatives.put(entry.getKey(), FastMath.scalb(entry.getValue(), n));
}
@@ -611,6 +612,7 @@ public double taylor(final double ... delta) {
* @exception MathIllegalArgumentException if the number of elements
* in the array is not equal to 2 (i.e. value and first derivative)
*/
+ @Override
public SparseGradient compose(final double... f) {
MathUtils.checkDimension(f.length, 2);
return compose(f[0], f[1]);
diff --git a/hipparchus-core/src/main/java/org/hipparchus/analysis/interpolation/PiecewiseBicubicSplineInterpolatingFunction.java b/hipparchus-core/src/main/java/org/hipparchus/analysis/interpolation/PiecewiseBicubicSplineInterpolatingFunction.java
index 7aeb7d950..d3837cbd4 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/analysis/interpolation/PiecewiseBicubicSplineInterpolatingFunction.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/analysis/interpolation/PiecewiseBicubicSplineInterpolatingFunction.java
@@ -197,7 +197,7 @@ public > T value(final T x, final T y)
*/
public boolean isValidPoint(double x,
double y) {
- return !(x < xval[0]) && !(x > xval[xval.length - 1]) && !(y < yval[0]) && !(y > yval[yval.length - 1]);
+ return x >= xval[0] && x <= xval[xval.length - 1] && y >= yval[0] && y <= yval[yval.length - 1];
}
/**
diff --git a/hipparchus-core/src/main/java/org/hipparchus/analysis/polynomials/PolynomialSplineFunction.java b/hipparchus-core/src/main/java/org/hipparchus/analysis/polynomials/PolynomialSplineFunction.java
index 78d82985a..53e2a0a40 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/analysis/polynomials/PolynomialSplineFunction.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/analysis/polynomials/PolynomialSplineFunction.java
@@ -244,6 +244,6 @@ public double[] getKnots() {
* @return {@code true} if {@code x} is a valid point.
*/
public boolean isValidPoint(double x) {
- return !(x < knots[0]) && !(x > knots[n]);
+ return x >= knots[0] && x <= knots[n];
}
}
diff --git a/hipparchus-core/src/main/java/org/hipparchus/fraction/BigFraction.java b/hipparchus-core/src/main/java/org/hipparchus/fraction/BigFraction.java
index 7aa7525e2..e852914da 100644
--- a/hipparchus-core/src/main/java/org/hipparchus/fraction/BigFraction.java
+++ b/hipparchus-core/src/main/java/org/hipparchus/fraction/BigFraction.java
@@ -798,8 +798,7 @@ private static BigInteger lcm(final BigInteger i0, final BigInteger i1) {
BigInteger a = i0.abs();
BigInteger b = i1.abs();
BigInteger gcd = i0.gcd(b);
- BigInteger lcm = (a.multiply(b)).divide(gcd);
- return lcm;
+ return (a.multiply(b)).divide(gcd);
}
/**
diff --git a/hipparchus-filtering/src/test/resources/org/hipparchus/filtering/kalman/radar.txt b/hipparchus-filtering/src/test/resources/org/hipparchus/filtering/kalman/radar.txt
index debffe4cf..e086ad255 100644
--- a/hipparchus-filtering/src/test/resources/org/hipparchus/filtering/kalman/radar.txt
+++ b/hipparchus-filtering/src/test/resources/org/hipparchus/filtering/kalman/radar.txt
@@ -1,3 +1,18 @@
+# Licensed to the Hipparchus project under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The Hipparchus project licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
0.05 1019.28514031432 4.19892612568951 89.9849853443891 1026.57855317649 100.258471325986 4.99992376451156 -0.372790425959169 100.009996198111 -0.0185911842189891 9.09938047660586
0.10 1003.64456445668 8.11769294928251 89.9053600341848 1015.61489076285 101.006331345204 9.99875593985759 -0.603337707126964 100.019767447146 -0.0500880581381564 4.77255476316153
0.15 1004.37512895945 12.1026224146043 89.8007116058457 1011.9454825604 102.236446261076 14.9936315467677 -0.820181035401379 100.028513982531 -0.0940397295724976 3.24142675739822
diff --git a/hipparchus-geometry/src/main/java/org/hipparchus/geometry/euclidean/threed/RotationOrder.java b/hipparchus-geometry/src/main/java/org/hipparchus/geometry/euclidean/threed/RotationOrder.java
index a8da94f22..f08649a93 100644
--- a/hipparchus-geometry/src/main/java/org/hipparchus/geometry/euclidean/threed/RotationOrder.java
+++ b/hipparchus-geometry/src/main/java/org/hipparchus/geometry/euclidean/threed/RotationOrder.java
@@ -206,7 +206,9 @@ public static RotationOrder getRotationOrder(final String value) {
return RotationOrder.valueOf(value);
} catch (IllegalArgumentException iae) {
// Invalid value. An exception is thrown
- throw new MathIllegalStateException(LocalizedGeometryFormats.INVALID_ROTATION_ORDER_NAME, value);
+ throw new MathIllegalStateException(iae,
+ LocalizedGeometryFormats.INVALID_ROTATION_ORDER_NAME,
+ value);
}
}
diff --git a/hipparchus-ode/src/main/java/org/hipparchus/ode/nonstiff/FieldExplicitRungeKuttaIntegrator.java b/hipparchus-ode/src/main/java/org/hipparchus/ode/nonstiff/FieldExplicitRungeKuttaIntegrator.java
index e93a406c3..a58e634b5 100644
--- a/hipparchus-ode/src/main/java/org/hipparchus/ode/nonstiff/FieldExplicitRungeKuttaIntegrator.java
+++ b/hipparchus-ode/src/main/java/org/hipparchus/ode/nonstiff/FieldExplicitRungeKuttaIntegrator.java
@@ -22,7 +22,6 @@
import org.hipparchus.Field;
import org.hipparchus.ode.FieldExpandableODE;
import org.hipparchus.ode.FieldODEIntegrator;
-import org.hipparchus.ode.FieldODEState;
import org.hipparchus.ode.FieldOrdinaryDifferentialEquation;
import org.hipparchus.util.MathArrays;
@@ -118,7 +117,7 @@ default int getNumberOfStages() {
* so it can be embedded in outer loops.
*
* This method is not used at all by the {@link #integrate(FieldExpandableODE,
- * FieldODEState, CalculusFieldElement)} method. It also completely ignores the step set at
+ * org.hipparchus.ode.FieldODEState, CalculusFieldElement)} method. It also completely ignores the step set at
* construction time, and uses only a single step to go from {@code t0} to {@code t}.
*
*
diff --git a/hipparchus-optim/src/conf/spotbugs-exclude-filter.xml b/hipparchus-optim/src/conf/spotbugs-exclude-filter.xml
index ea3ba10ed..745a0b01a 100644
--- a/hipparchus-optim/src/conf/spotbugs-exclude-filter.xml
+++ b/hipparchus-optim/src/conf/spotbugs-exclude-filter.xml
@@ -69,11 +69,66 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPModifiedRuizEquilibrium.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPModifiedRuizEquilibrium.java
index 461c37c41..a544def8b 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPModifiedRuizEquilibrium.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPModifiedRuizEquilibrium.java
@@ -86,8 +86,8 @@ public void normalize(double epsilon, int maxIteration) {
RealVector q1 = q.copy();
RealMatrix H1 = H.copy();
RealMatrix A1 = A.copy();
- RealMatrix diagD = null;
- RealMatrix diagE = null;
+ RealMatrix diagD;
+ RealMatrix diagE;
RealMatrix SD = MatrixUtils.createRealIdentityMatrix(H.getRowDimension());
RealMatrix SE = MatrixUtils.createRealIdentityMatrix(A.getRowDimension());
RealVector H1norm = new ArrayRealVector(H1.getColumnDimension());
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptimizer.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptimizer.java
index 7de846eea..ba5007fc2 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptimizer.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptimizer.java
@@ -62,7 +62,7 @@ public class ADMMQPOptimizer extends QPOptimizer {
private QuadraticFunction function;
/** Problem solver. */
- private ADMMQPKKT solver;
+ private final ADMMQPKKT solver;
/** Problem convergence checker. */
private ADMMQPConvergenceChecker checker;
@@ -124,7 +124,6 @@ protected void parseOptimizationData(OptimizationData... optData) {
if (data instanceof ADMMQPOption) {
settings = (ADMMQPOption) data;
- continue;
}
}
@@ -205,7 +204,7 @@ public LagrangeSolution doOptimize() {
RealVector qw = q.copy();
RealVector ubw = ub.copy();
RealVector lbw = lb.copy();
- RealVector x =null;
+ RealVector x;
if (getStartPoint() != null) {
x = new ArrayRealVector(getStartPoint());
} else {
@@ -214,7 +213,7 @@ public LagrangeSolution doOptimize() {
ADMMQPModifiedRuizEquilibrium dec = new ADMMQPModifiedRuizEquilibrium(H, A,q);
- if (settings.getScaling()) {
+ if (settings.isScaling()) {
//
dec.normalize(settings.getEps(), settings.getScaleMaxIteration());
Hw = dec.getScaledH();
@@ -236,7 +235,7 @@ public LagrangeSolution doOptimize() {
solver.initialize(Hw, Aw, qw, me, lbw, ubw, rho, settings.getSigma(), settings.getAlpha());
RealVector xstar = null;
RealVector ystar = null;
- RealVector zstar = null;
+ RealVector zstar;
while (iterations.getCount() <= iterations.getMaximalCount()) {
ADMMQPSolution sol = solver.iterate(x, y, z);
@@ -257,7 +256,7 @@ public LagrangeSolution doOptimize() {
}
- if (settings.getScaling()) {
+ if (settings.isScaling()) {
xstar = dec.unscaleX(x);
ystar = dec.unscaleY(y);
@@ -284,28 +283,22 @@ public LagrangeSolution doOptimize() {
}
-
-
//SOLUTION POLISHING
-
- ADMMQPSolution finalSol = null;
- if (settings.getPolishing()) {
- finalSol = polish(Hw, Aw, qw, lbw, ubw, x, y, z);
- if (settings.getScaling()) {
+ if (settings.isPolishing()) {
+ ADMMQPSolution finalSol = polish(Hw, Aw, qw, lbw, ubw, x, y, z);
+ if (settings.isScaling()) {
xstar = dec.unscaleX(finalSol.getX());
ystar = dec.unscaleY(finalSol.getLambda());
- zstar = dec.unscaleZ(finalSol.getZ());
} else {
xstar = finalSol.getX();
ystar = finalSol.getLambda();
- zstar = finalSol.getZ();
}
}
for (int i = 0; i < me + mi; i++) {
- ystar.setEntry(i,-ystar.getEntry(i));
+ ystar.setEntry(i, -ystar.getEntry(i));
}
- return new LagrangeSolution(xstar,ystar,function.value(xstar));
+ return new LagrangeSolution(xstar, ystar, function.value(xstar));
}
@@ -355,17 +348,17 @@ private ADMMQPSolution polish(RealMatrix H, RealMatrix A, RealVector q, RealVect
}
}
- RealMatrix Aactive = null;
- RealVector lub = null;
+ RealMatrix Aactive;
+ RealVector lub;
RealVector ystar;
RealVector xstar = x.copy();
//!Aentry.isEmpty()
if (!Aentry.isEmpty()) {
- Aactive = new Array2DRowRealMatrix(Aentry.toArray(new double[Aentry.size()][]));
- lub = new ArrayRealVector(lubEntry.toArray(new Double[lubEntry.size()]));
- ystar = new ArrayRealVector(yEntry.toArray(new Double[yEntry.size()]));
+ Aactive = new Array2DRowRealMatrix(Aentry.toArray(new double[0][]));
+ lub = new ArrayRealVector(lubEntry.toArray(new Double[0]));
+ ystar = new ArrayRealVector(yEntry.toArray(new Double[0]));
solver.initialize(H, Aactive, q, 0, lub, lub,
settings.getSigma(), settings.getSigma(), settings.getAlpha());
@@ -396,7 +389,7 @@ private ADMMQPSolution polish(RealMatrix H, RealMatrix A, RealVector q, RealVect
*/
private boolean manageRho(int me, double rp, double rd, double maxPrimal, double maxDual) {
boolean updated = false;
- if (settings.getRhoUpdate()) {
+ if (settings.updateRho()) {
// estimate new step size
double rhonew = FastMath.min(FastMath.max(rho * FastMath.sqrt((rp * maxDual) / (rd * maxPrimal)),
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOption.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOption.java
index e752d793f..c48528a8a 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOption.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOption.java
@@ -78,7 +78,7 @@ public class ADMMQPOption implements OptimizationData {
private int scaleMaxIteration;
/** Value for adapt the weight during iterations. */
- private boolean rhoUpdate;
+ private boolean updateRho;
/** Max Value for thr Weight for ADMM iteration. */
private double rhoMax;
@@ -104,7 +104,7 @@ public ADMMQPOption() {
alpha = ADMMQPOption.DEFAULT_ALPHA;
scaling = ADMMQPOption.DEFAULT_SCALING;
scaleMaxIteration = ADMMQPOption.DEFAULT_SCALING_MAX_ITERATION;
- rhoUpdate = ADMMQPOption.DEFAULT_RHO_UPDATE;
+ updateRho = ADMMQPOption.DEFAULT_RHO_UPDATE;
rhoMax = ADMMQPOption.DEFAULT_RHO_MAX;
rhoMin = ADMMQPOption.DEFAULT_RHO_MIN;
maxRhoIteration = ADMMQPOption.DEFAULT_MAX_RHO_ITERATION;
@@ -175,10 +175,10 @@ public void setScaling(final boolean scaling) {
this.scaling = scaling;
}
- /** Get scaling enabling flag.
+ /** Check if scaling is enabled.
* @return true if scaling is enabled
*/
- public boolean getScaling() {
+ public boolean isScaling() {
return scaling;
}
@@ -197,17 +197,17 @@ public int getScaleMaxIteration() {
}
/** Set weight updating flag.
- * @param rhoUpdate if true, weight is updated during iterations
+ * @param updateRho if true, weight is updated during iterations
*/
- public void setRhoUpdate(final boolean rhoUpdate) {
- this.rhoUpdate = rhoUpdate;
+ public void setUpdateRho(final boolean updateRho) {
+ this.updateRho = updateRho;
}
- /** Get weight updating flag.
+ /** Check if weight updating is enabled.
* @return true if weight is updated during iterations
*/
- public boolean getRhoUpdate() {
- return rhoUpdate;
+ public boolean updateRho() {
+ return updateRho;
}
/** Set min Value for the Weight for ADMM iteration.
@@ -259,10 +259,10 @@ public void setPolishing(final boolean polishing) {
this.polishing = polishing;
}
- /** Get polishing enabling flag.
+ /** Check if polishing is enabled.
* @return true if polishing is enabled
*/
- public boolean getPolishing() {
+ public boolean isPolishing() {
return polishing;
}
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/AbstractSQPOptimizer.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/AbstractSQPOptimizer.java
index 79c795178..1c8db9ecc 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/AbstractSQPOptimizer.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/AbstractSQPOptimizer.java
@@ -103,7 +103,6 @@ protected void parseOptimizationData(OptimizationData... optData) {
if (data instanceof SQPOption) {
settings = (SQPOption) data;
- continue;
}
}
@@ -137,7 +136,7 @@ protected RealVector lagrangianGradX(final RealVector currentGrad, final RealMat
final RealVector x, final RealVector y) {
int me = 0;
- int mi = 0;
+ int mi;
RealVector partial = currentGrad.copy();
if (getEqConstraint() != null) {
me = getEqConstraint().dimY();
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptimizerGM.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptimizerGM.java
index 67f007481..9cdafaee7 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptimizerGM.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptimizerGM.java
@@ -55,12 +55,7 @@ public class SQPOptimizerGM extends AbstractSQPOptimizer {
/** Hessian of the objective function. */
private RealMatrix hessian;
- /** Simple constructor.
- */
- public SQPOptimizerGM() {
- super();
- }
-
+ /** {@inheritDoc} */
@Override
public LagrangeSolution doOptimize() {
int me = 0;
@@ -75,9 +70,9 @@ public LagrangeSolution doOptimize() {
mi = getIqConstraint().dimY();
}
- double alfa = 1.0;
- double rho = 100.0;
- RealVector x = null;
+ double alpha;
+ double rho;
+ RealVector x;
if (getStartPoint() != null) {
x = new ArrayRealVector(getStartPoint());
} else {
@@ -105,7 +100,7 @@ public LagrangeSolution doOptimize() {
maxGrad = FastMath.max(maxGrad, inequalityEval.getLInfNorm());
}
- if (!getSettings().getUseFunHessian()) {
+ if (!getSettings().useFunHessian()) {
hessian= MatrixUtils.createRealIdentityMatrix(x.getDimension()).scalarMultiply(maxGrad);
} else {
hessian = getObj().hessian(x);
@@ -118,24 +113,24 @@ public LagrangeSolution doOptimize() {
iterations.increment();
- alfa = 1.0;
+ alpha = 1.0;
- LagrangeSolution sol1 = null;
+ LagrangeSolution sol1;
//SOLVE QP
- sol1 = solveQP(x,y);
+ sol1 = solveQP(x);
RealVector p = sol1.getX();
RealVector e = sol1.getLambda().subtract(y);
RealVector s = calculateSvector(y,rho);
RealVector se = null;
- RealMatrix jacobi = null;
+ RealMatrix jacobi; // NOPMD - PMD detext a false positive here
RealVector q = null;
RealVector qe = null;
//TEST CON SI SOLO PER INEQUALITY AND Q FOR ALL
if (getEqConstraint() != null) {
se = new ArrayRealVector(me);
- jacobi = constraintJacob.getSubMatrix(0, me - 1, 0, x.getDimension() - 1);
+ jacobi = constraintJacob.getSubMatrix(0, me - 1, 0, x.getDimension() - 1); // NOPMD - PMD detect a false positive here
qe = new ArrayRealVector(me);
}
if (getIqConstraint() != null) {
@@ -153,52 +148,54 @@ public LagrangeSolution doOptimize() {
g.setSubVector(0,equalityEval.subtract(getEqConstraint().getLowerBound()));
}
if (mi > 0) {
- g.setSubVector(me,inequalityEval.subtract(getIqConstraint().getLowerBound()).subtract(s));
+ g.setSubVector(me, inequalityEval.subtract(getIqConstraint().getLowerBound()).subtract(s));
}
- double rhoSegnato = 2.0 * e.getNorm()/g.getNorm();
+ double rhoSegnato = 2.0 * e.getNorm() / g.getNorm();
// rho = rhoSegnato;
//if (!(penaltyGradient<=-0.5 * p.dotProduct(hessian.operate(p))))
- while (!(penaltyGradient <= -0.5 * p.dotProduct(hessian.operate(p)))) {
+ while (penaltyGradient > -0.5 * p.dotProduct(hessian.operate(p))) {
rho = FastMath.max(rhoSegnato,2.0 * rho);
penaltyGradient = penaltyFunctionGrad(p, y, s, e, qe, q, rho);
}
//LINE SEARCH
- double alfaEval = getObj().value(x.add(p.mapMultiply(alfa)));
+ double alfaEval = getObj().value(x.add(p.mapMultiply(alpha)));
- double alfaPenalty = 0;
+ double alphaPenalty;
RealVector sineq = null;
RealVector seq = null;
if (se != null) {
- seq = se.add(qe.mapMultiply(alfa));
+ seq = se.add(qe.mapMultiply(alpha));
}
if (s != null) {
- sineq = s.add(q.mapMultiply(alfa));
+ sineq = s.add(q.mapMultiply(alpha));
}
- double currentPenalty = penaltyFunction(functionEval,x,y,se,s,rho);
- alfaPenalty = penaltyFunction(alfaEval,x.add(p.mapMultiply(alfa)),y.add(e.mapMultiply(alfa)),seq,sineq,rho);
+ double currentPenalty = penaltyFunction(functionEval, x, y, se, s, rho);
+ alphaPenalty = penaltyFunction(alfaEval,x.add(p.mapMultiply(alpha)),
+ y.add(e.mapMultiply(alpha)),
+ seq, sineq, rho);
int search = 0;
- while ((alfaPenalty -currentPenalty) >= getSettings().getMu() * alfa * penaltyGradient &&
+ while ((alphaPenalty -currentPenalty) >= getSettings().getMu() * alpha * penaltyGradient &&
search < getSettings().getMaxLineSearchIteration()) {
- double alfaStar = -0.5 * alfa * alfa * penaltyGradient/ (-alfa * penaltyGradient + alfaPenalty - currentPenalty);
+ double alfaStar = -0.5 * alpha * alpha * penaltyGradient/ (-alpha * penaltyGradient + alphaPenalty - currentPenalty);
- alfa = FastMath.max(getSettings().getB() * alfa, FastMath.min(1.0,alfaStar));
- alfaEval = getObj().value(x.add(p.mapMultiply(alfa)));
+ alpha = FastMath.max(getSettings().getB() * alpha, FastMath.min(1.0,alfaStar));
+ alfaEval = getObj().value(x.add(p.mapMultiply(alpha)));
if (se != null) {
- seq = se.add(qe.mapMultiply(alfa));
+ seq = se.add(qe.mapMultiply(alpha));
}
if (s != null) {
- sineq = s.add(q.mapMultiply(alfa));
+ sineq = s.add(q.mapMultiply(alpha));
}
- alfaPenalty = penaltyFunction(alfaEval,x.add(p.mapMultiply(alfa)),y.add(e.mapMultiply(alfa)),seq,sineq,rho);
+ alphaPenalty = penaltyFunction(alfaEval,x.add(p.mapMultiply(alpha)),y.add(e.mapMultiply(alpha)),seq,sineq,rho);
search = search + 1;
@@ -206,36 +203,35 @@ search < getSettings().getMaxLineSearchIteration()) {
if (getSettings().getConvCriteria() == 0) {
- if (p.mapMultiply(alfa).dotProduct(hessian.operate(p.mapMultiply(alfa))) < getSettings().getEps() * getSettings().getEps()) {
+ if (p.mapMultiply(alpha).dotProduct(hessian.operate(p.mapMultiply(alpha))) < getSettings().getEps() * getSettings().getEps()) {
break;
}
} else {
- if (alfa * p.getNorm() oldPenalty = new ArrayList();
+ ArrayList oldPenalty = new ArrayList<>();
//INITIAL VALUES
double functionEval = this.getObj().value(x);
functionGradient = this.getObj().gradient(x);
@@ -117,7 +112,7 @@ public LagrangeSolution doOptimize() {
maxGrad = FastMath.max(maxGrad, inequalityEval.getLInfNorm());
}
- if (!getSettings().getUseFunHessian()) {
+ if (!getSettings().useFunHessian()) {
hessian = MatrixUtils.createRealIdentityMatrix(x.getDimension());
} else {
hessian = this.getObj().hessian(x);
@@ -126,13 +121,12 @@ public LagrangeSolution doOptimize() {
for (int i = 0; i < this.getMaxIterations(); i++) {
iterations.increment();
-
- alfa = 1.0;
+ alpha = 1.0;
LagrangeSolution sol1 = null;
int qpLoop = 0;
double sigma = maxGrad;
- double currentPenaltyGrad = 0;
+ double currentPenaltyGrad;
//LOOP TO FIND SOLUTION WITH SIGMA getSettings().getSigmaMax() && qpLoop < getSettings().getQpMaxLoop()) {
@@ -152,7 +146,7 @@ public LagrangeSolution doOptimize() {
if (qpLoop == getSettings().getQpMaxLoop()) {
dx = (MatrixUtils.inverse(hessian).operate(penaltyFunctionGradX(functionGradient, x, y, r))).mapMultiply(-1.0);
- dy = y.subtract(penaltyFunctionGradY(functionGradient, x, y, r));
+ dy = y.subtract(penaltyFunctionGradY(x, y, r));
sigma = 0;
} else {
@@ -162,31 +156,30 @@ public LagrangeSolution doOptimize() {
sigma = 0;
}
dy = sol1.getLambda();
- r = updateRj(hessian, x, y, dx, dy, r, sigma);
+ r = updateRj(hessian, y, dx, dy, r, sigma);
}
currentPenaltyGrad = penaltyFunctionGrad(functionGradient, dx, dy, x, y, r);
int search = 0;
- rho = updateRho(dx, dy, hessian, constraintJacob, sigma, rho);
+ rho = updateRho(dx, dy, hessian, constraintJacob, sigma);
double currentPenalty = penaltyFunction(functionEval, 0, x, y, dx, dy.subtract(y), r);
- double alfaF = this.getObj().value(x.add(dx.mapMultiply(alfa)));
- double alfaPenalty = penaltyFunction(alfaF, alfa, x, y, dx, dy.subtract(y), r);
+ double alphaF = this.getObj().value(x.add(dx.mapMultiply(alpha)));
+ double alfaPenalty = penaltyFunction(alphaF, alpha, x, y, dx, dy.subtract(y), r);
//LINE SEARCH
- while ((alfaPenalty - currentPenalty) >= getSettings().getMu() * alfa * currentPenaltyGrad &&
+ while ((alfaPenalty - currentPenalty) >= getSettings().getMu() * alpha * currentPenaltyGrad &&
search < getSettings().getMaxLineSearchIteration()) {
- double alfaStar = -0.5 * alfa * alfa * currentPenaltyGrad / (-alfa * currentPenaltyGrad + alfaPenalty - currentPenalty);
+ double alfaStar = -0.5 * alpha * alpha * currentPenaltyGrad / (-alpha * currentPenaltyGrad + alfaPenalty - currentPenalty);
- alfa = FastMath.max(getSettings().getB() * alfa, FastMath.min(1.0, alfaStar));
- //alfa = FastMath.min(1.0, FastMath.max(this.b * alfa, alfaStar));
- alfaF = this.getObj().value(x.add(dx.mapMultiply(alfa)));
- alfaPenalty = penaltyFunction(alfaF, alfa, x, y, dx, dy.subtract(y), r);
+ alpha = FastMath.max(getSettings().getB() * alpha, FastMath.min(1.0, alfaStar));
+ alphaF = this.getObj().value(x.add(dx.mapMultiply(alpha)));
+ alfaPenalty = penaltyFunction(alphaF, alpha, x, y, dx, dy.subtract(y), r);
search = search + 1;
}
@@ -194,13 +187,13 @@ search < getSettings().getMaxLineSearchIteration()) {
if (getSettings().getConvCriteria() == 0) {
- if (dx.mapMultiply(alfa).dotProduct(hessian.operate(dx.mapMultiply(alfa))) < getSettings().getEps() * getSettings().getEps()) {
+ if (dx.mapMultiply(alpha).dotProduct(hessian.operate(dx.mapMultiply(alpha))) < getSettings().getEps() * getSettings().getEps()) {
// x = x.add(dx.mapMultiply(alfa));
// y = y.add((dy.subtract(y)).mapMultiply(alfa));
break;
}
} else {
- if (alfa * dx.getNorm() < getSettings().getEps() * (1 + x.getNorm())) {
+ if (alpha * dx.getNorm() < getSettings().getEps() * (1 + x.getNorm())) {
// x = x.add(dx.mapMultiply(alfa));
// y = y.add((dy.subtract(y)).mapMultiply(alfa));
break;
@@ -215,25 +208,23 @@ search < getSettings().getMaxLineSearchIteration()) {
boolean notMonotone = false;
if (search == getSettings().getMaxLineSearchIteration()) {
- search = 0;
-
- alfa = 1.0;
+ alpha = 1.0;
search = 0;
Double max = Collections.max(oldPenalty);
if (oldPenalty.size() == 1) {
max = max * 1.3;
}
- while ((alfaPenalty - max) >= getSettings().getMu() * alfa * currentPenaltyGrad &&
+ while ((alfaPenalty - max) >= getSettings().getMu() * alpha * currentPenaltyGrad &&
search < getSettings().getMaxLineSearchIteration()) {
- double alfaStar = -0.5 * alfa * alfa * currentPenaltyGrad / (-alfa * currentPenaltyGrad + alfaPenalty - currentPenalty);
+ double alfaStar = -0.5 * alpha * alpha * currentPenaltyGrad / (-alpha * currentPenaltyGrad + alfaPenalty - currentPenalty);
- alfa = FastMath.max(getSettings().getB() * alfa, FastMath.min(1.0, alfaStar));
+ alpha = FastMath.max(getSettings().getB() * alpha, FastMath.min(1.0, alfaStar));
// alfa = FastMath.min(1.0, FastMath.max(this.b * alfa, alfaStar));
// alfa = FastMath.max(this.b * alfa, alfaStar);
- alfaF = this.getObj().value(x.add(dx.mapMultiply(alfa)));
- alfaPenalty = penaltyFunction(alfaF, alfa, x, y, dx, dy.subtract(y), r);
+ alphaF = this.getObj().value(x.add(dx.mapMultiply(alpha)));
+ alfaPenalty = penaltyFunction(alphaF, alpha, x, y, dx, dy.subtract(y), r);
search = search + 1;
}
@@ -245,28 +236,28 @@ search < getSettings().getMaxLineSearchIteration()) {
RealVector oldGradient = functionGradient;
RealMatrix oldJacob = constraintJacob;
// RealVector old1 = penaltyFunctionGradX(oldGradient,x, y.add((dy.subtract(y)).mapMultiply(alfa)),r);
- RealVector old1 = lagrangianGradX(oldGradient, oldJacob, x, y.add((dy.subtract(y)).mapMultiply(alfa)));
- functionEval = alfaF;
- functionGradient = this.getObj().gradient(x.add(dx.mapMultiply(alfa)));
+ RealVector old1 = lagrangianGradX(oldGradient, oldJacob, x, y.add((dy.subtract(y)).mapMultiply(alpha)));
+ functionEval = alphaF;
+ functionGradient = this.getObj().gradient(x.add(dx.mapMultiply(alpha)));
if (this.getEqConstraint() != null) {
- equalityEval = this.getEqConstraint().value(x.add(dx.mapMultiply(alfa)));
+ equalityEval = this.getEqConstraint().value(x.add(dx.mapMultiply(alpha)));
}
if (this.getIqConstraint() != null) {
- inequalityEval = this.getIqConstraint().value(x.add(dx.mapMultiply(alfa)));
+ inequalityEval = this.getIqConstraint().value(x.add(dx.mapMultiply(alpha)));
}
- constraintJacob = computeJacobianConstraint(x.add(dx.mapMultiply(alfa)));
+ constraintJacob = computeJacobianConstraint(x.add(dx.mapMultiply(alpha)));
// RealVector new1 = penaltyFunctionGradX(functionGradient,x.add(dx.mapMultiply(alfa)), y.add((dy.subtract(y)).mapMultiply(alfa)),r);
- RealVector new1 = lagrangianGradX(functionGradient, constraintJacob, x.add(dx.mapMultiply(alfa)), y.add((dy.subtract(y)).mapMultiply(alfa)));
+ RealVector new1 = lagrangianGradX(functionGradient, constraintJacob, x.add(dx.mapMultiply(alpha)), y.add((dy.subtract(y)).mapMultiply(alpha)));
if (failedSearch == 2) {
hessian = MatrixUtils.createRealIdentityMatrix(x.getDimension());
failedSearch = 0;
}
- if (notMonotone == false) {
+ if (!notMonotone) {
// if (iterations.getCount()==1)
// {
// RealVector yfirst = new1.subtract(old1);
@@ -274,7 +265,7 @@ search < getSettings().getMaxLineSearchIteration()) {
// double scaleFactor = yfirst.dotProduct(sfirst)/yfirst.dotProduct(yfirst);
// hessian = hessian.scalarMultiply(scaleFactor);
// }
- hessian = BFGSFormula(hessian, dx, alfa, new1, old1);
+ hessian = BFGSFormula(hessian, dx, alpha, new1, old1);
}
//STORE PENALTY IN QUEQUE TO PERFORM NOT MONOTONE LINE SEARCH IF NECESSARY
@@ -284,8 +275,8 @@ search < getSettings().getMaxLineSearchIteration()) {
oldPenalty.remove(0);
}
- x = x.add(dx.mapMultiply(alfa));
- y = y.add((dy.subtract(y)).mapMultiply(alfa));
+ x = x.add(dx.mapMultiply(alpha));
+ y = y.add((dy.subtract(y)).mapMultiply(alpha));
}
return new LagrangeSolution(x, y, functionEval);
@@ -295,7 +286,7 @@ private double penaltyFunction(double currentF, double alfa, RealVector x, RealV
// the set of constraints is the same as the previous one but they must be evaluated with the increment
int me = 0;
- int mi = 0;
+ int mi;
double partial = currentF;
RealVector yalfa = y.add(uv.mapMultiply(alfa));
if (getEqConstraint() != null) {
@@ -338,7 +329,7 @@ private double penaltyFunction(double currentF, double alfa, RealVector x, RealV
private double penaltyFunctionGrad(RealVector currentGrad, RealVector dx, RealVector dy, RealVector x, RealVector y, RealVector r) {
int me = 0;
- int mi = 0;
+ int mi;
double partial = currentGrad.dotProduct(dx);
if (getEqConstraint() != null) {
me = getEqConstraint().dimY();
@@ -389,7 +380,7 @@ private double penaltyFunctionGrad(RealVector currentGrad, RealVector dx, RealVe
private RealVector penaltyFunctionGradX(RealVector currentGrad, RealVector x, RealVector y, RealVector r) {
int me = 0;
- int mi = 0;
+ int mi;
RealVector partial = currentGrad.copy();
if (getEqConstraint() != null) {
me = getEqConstraint().dimY();
@@ -432,10 +423,10 @@ private RealVector penaltyFunctionGradX(RealVector currentGrad, RealVector x, Re
return partial;
}
- private RealVector penaltyFunctionGradY(RealVector currentGrad, RealVector x, RealVector y, RealVector r) {
+ private RealVector penaltyFunctionGradY(RealVector x, RealVector y, RealVector r) {
int me = 0;
- int mi = 0;
+ int mi;
RealVector partial = new ArrayRealVector(y.getDimension());
if (getEqConstraint() != null) {
me = getEqConstraint().dimY();
@@ -472,13 +463,11 @@ private RealVector penaltyFunctionGradY(RealVector currentGrad, RealVector x, Re
return partial;
}
- private RealVector updateRj(RealMatrix H, RealVector x, RealVector y, RealVector dx, RealVector dy, RealVector r, double additional) { //r = updateRj(currentH,dx,y,u,r,sigm);
+ private RealVector updateRj(RealMatrix H, RealVector y, RealVector dx, RealVector dy, RealVector r, double additional) { //r = updateRj(currentH,dx,y,u,r,sigm);
//CALCULATE SIGMA VECTOR THAT DEPEND FROM ITERATION
RealVector sigma = new ArrayRealVector(r.getDimension());
for (int i = 0; i < sigma.getDimension(); i++) {
- double appoggio = 0;
- // if (r.getEntry(i)>epsilon)
- appoggio = iterations.getCount() / FastMath.sqrt(r.getEntry(i));
+ final double appoggio = iterations.getCount() / FastMath.sqrt(r.getEntry(i));
sigma.setEntry(i, FastMath.min(1.0, appoggio));
}
@@ -512,7 +501,7 @@ private RealVector penaltyFunctionGradY(RealVector currentGrad, RealVector x, Re
return r1;
}
- private double updateRho(RealVector dx, RealVector dy, RealMatrix H, RealMatrix jacobianG, double additionalVariable, double rho) {
+ private double updateRho(RealVector dx, RealVector dy, RealMatrix H, RealMatrix jacobianG, double additionalVariable) {
double num = 10.0 * FastMath.pow(dx.dotProduct(jacobianG.transpose().operate(dy)), 2);
double den = (1.0 - additionalVariable) * (1.0 - additionalVariable) * dx.dotProduct(H.operate(dx));
@@ -541,7 +530,7 @@ private LagrangeSolution solveQP(RealVector x, RealVector y, double rho) {
}
// violated = true;
- if (me > 0 || violated == true) {
+ if (me > 0 || violated) {
add = 1;
}
@@ -556,7 +545,7 @@ private LagrangeSolution solveQP(RealVector x, RealVector y, double rho) {
g1.setSubVector(0, g);
LinearEqualityConstraint eqc = null;
- RealVector conditioneq = null;
+ RealVector conditioneq;
if (getEqConstraint() != null) {
RealMatrix eqJacob = constraintJacob.getSubMatrix(0, me - 1, 0, x.getDimension() - 1);
@@ -611,20 +600,20 @@ private LagrangeSolution solveQP(RealVector x, RealVector y, double rho) {
}
QuadraticFunction q = new QuadraticFunction(H1, g1, 0);
- LagrangeSolution sol = null;
- double sigma = 0;
ADMMQPOptimizer solver = new ADMMQPOptimizer();
- sol = solver.optimize(new ObjectiveFunction(q), iqc, eqc, bc);
+ LagrangeSolution sol = solver.optimize(new ObjectiveFunction(q), iqc, eqc, bc);
+ final double sigma;
if (add == 1) {
sigma = sol.getX().getEntry(x.getDimension());
} else {
sigma = 0;
}
- LagrangeSolution solnew = new LagrangeSolution(sol.getX().getSubVector(0, x.getDimension()), sol.getLambda().getSubVector(0, me + mi), sigma);
- return solnew;
+ return new LagrangeSolution(sol.getX().getSubVector(0, x.getDimension()),
+ sol.getLambda().getSubVector(0, me + mi),
+ sigma);
}
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOption.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOption.java
index b9c9d71f2..d115724d0 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOption.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOption.java
@@ -219,7 +219,7 @@ public void setUseFunHessian(final boolean useFunHessian) {
/** Check if using direct the function Hessian is enabled or disabled.
* @return true if using direct the function Hessian is enabled
*/
- public boolean getUseFunHessian() {
+ public boolean useFunHessian() {
return useFunHessian;
}
diff --git a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/VectorDifferentiableFunction.java b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/VectorDifferentiableFunction.java
index c5665ab74..b76041c52 100644
--- a/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/VectorDifferentiableFunction.java
+++ b/hipparchus-optim/src/main/java/org/hipparchus/optim/nonlinear/vector/constrained/VectorDifferentiableFunction.java
@@ -56,6 +56,7 @@ public interface VectorDifferentiableFunction extends MultivariateVectorFunction
* @param x a point to evaluate this function at.
* @return the value of this function at (x)
*/
+ @Override
default double[] value(final double[] x) {
return value(new ArrayRealVector(x, false)).toArray();
}
diff --git a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptionTest.java b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptionTest.java
index bff9413e3..5264f7a20 100644
--- a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptionTest.java
+++ b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/ADMMQPOptionTest.java
@@ -52,12 +52,12 @@ public void testSettersBoolean() {
// WHEN
final boolean expectedValue = true;
option.setPolishing(expectedValue);
- option.setRhoUpdate(expectedValue);
+ option.setUpdateRho(expectedValue);
option.setScaling(expectedValue);
// THEN
- Assert.assertEquals(expectedValue, option.getPolishing());
- Assert.assertEquals(expectedValue, option.getRhoUpdate());
- Assert.assertEquals(expectedValue, option.getScaling());
+ Assert.assertEquals(expectedValue, option.isPolishing());
+ Assert.assertEquals(expectedValue, option.updateRho());
+ Assert.assertEquals(expectedValue, option.isScaling());
}
@Test
diff --git a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/BoundedConstraintTest.java b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/BoundedConstraintTest.java
index b6f47414c..911f2fe02 100644
--- a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/BoundedConstraintTest.java
+++ b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/BoundedConstraintTest.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Hipparchus project under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The Hipparchus project licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.hipparchus.optim.nonlinear.vector.constrained;
import org.hipparchus.linear.MatrixUtils;
diff --git a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptionTest.java b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptionTest.java
index 6b1ae4917..3802e4803 100644
--- a/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptionTest.java
+++ b/hipparchus-optim/src/test/java/org/hipparchus/optim/nonlinear/vector/constrained/SQPOptionTest.java
@@ -50,7 +50,7 @@ public void testSettersBoolean() {
final boolean expectedValue = true;
option.setUseFunHessian(expectedValue);
// THEN
- Assert.assertEquals(expectedValue, option.getUseFunHessian());
+ Assert.assertEquals(expectedValue, option.useFunHessian());
}
@Test
diff --git a/hipparchus-stat/src/main/java/org/hipparchus/stat/descriptive/rank/PSquarePercentile.java b/hipparchus-stat/src/main/java/org/hipparchus/stat/descriptive/rank/PSquarePercentile.java
index 7b8354569..73bef7fbe 100644
--- a/hipparchus-stat/src/main/java/org/hipparchus/stat/descriptive/rank/PSquarePercentile.java
+++ b/hipparchus-stat/src/main/java/org/hipparchus/stat/descriptive/rank/PSquarePercentile.java
@@ -469,23 +469,21 @@ public double getPercentileValue() {
* sample fits
*/
private int findCellAndUpdateMinMax(final double observation) {
- int k = -1;
if (observation < height(1)) {
markerArray[1].markerHeight = observation;
- k = 1;
+ return 1;
} else if (observation < height(2)) {
- k = 1;
+ return 1;
} else if (observation < height(3)) {
- k = 2;
+ return 2;
} else if (observation < height(4)) {
- k = 3;
+ return 3;
} else if (observation <= height(5)) {
- k = 4;
+ return 4;
} else {
markerArray[5].markerHeight = observation;
- k = 4;
+ return 4;
}
- return k;
}
/**