Remove deprecated classes in optim package.
This commit is contained in:
parent
8a76453566
commit
0737cf82db
|
@ -92,6 +92,7 @@ public class LinearObjectiveFunction
|
||||||
* @param point Point at which linear equation must be evaluated.
|
* @param point Point at which linear equation must be evaluated.
|
||||||
* @return the value of the linear equation at the current point.
|
* @return the value of the linear equation at the current point.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public double value(final double[] point) {
|
public double value(final double[] point) {
|
||||||
return value(new ArrayRealVector(point, false));
|
return value(new ArrayRealVector(point, false));
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,7 @@ public class LineSearch {
|
||||||
final double[] direction) {
|
final double[] direction) {
|
||||||
final int n = startPoint.length;
|
final int n = startPoint.length;
|
||||||
final UnivariateFunction f = new UnivariateFunction() {
|
final UnivariateFunction f = new UnivariateFunction() {
|
||||||
|
@Override
|
||||||
public double value(double alpha) {
|
public double value(double alpha) {
|
||||||
final double[] x = new double[n];
|
final double[] x = new double[n];
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
|
|
|
@ -16,10 +16,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.commons.math4.optim.nonlinear.scalar;
|
package org.apache.commons.math4.optim.nonlinear.scalar;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.math4.exception.NotStrictlyPositiveException;
|
import org.apache.commons.math4.exception.NotStrictlyPositiveException;
|
||||||
import org.apache.commons.math4.exception.NullArgumentException;
|
import org.apache.commons.math4.exception.NullArgumentException;
|
||||||
|
@ -94,6 +94,7 @@ public class MultiStartMultivariateOptimizer
|
||||||
*/
|
*/
|
||||||
private Comparator<PointValuePair> getPairComparator() {
|
private Comparator<PointValuePair> getPairComparator() {
|
||||||
return new Comparator<PointValuePair>() {
|
return new Comparator<PointValuePair>() {
|
||||||
|
@Override
|
||||||
public int compare(final PointValuePair o1,
|
public int compare(final PointValuePair o1,
|
||||||
final PointValuePair o2) {
|
final PointValuePair o2) {
|
||||||
if (o1 == null) {
|
if (o1 == null) {
|
||||||
|
|
|
@ -175,6 +175,7 @@ public class MultivariateFunctionMappingAdapter
|
||||||
* @return underlying function value
|
* @return underlying function value
|
||||||
* @see #unboundedToBounded(double[])
|
* @see #unboundedToBounded(double[])
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public double value(double[] point) {
|
public double value(double[] point) {
|
||||||
return bounded.value(unboundedToBounded(point));
|
return bounded.value(unboundedToBounded(point));
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,6 +158,7 @@ public class MultivariateFunctionPenaltyAdapter
|
||||||
* @param point unbounded point
|
* @param point unbounded point
|
||||||
* @return either underlying function value or penalty function value
|
* @return either underlying function value or penalty function value
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public double value(double[] point) {
|
public double value(double[] point) {
|
||||||
|
|
||||||
for (int i = 0; i < scale.length; ++i) {
|
for (int i = 0; i < scale.length; ++i) {
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.scalar.gradient;
|
package org.apache.commons.math4.optim.nonlinear.scalar.gradient;
|
||||||
|
|
||||||
import org.apache.commons.math4.analysis.solvers.UnivariateSolver;
|
|
||||||
import org.apache.commons.math4.exception.MathInternalError;
|
import org.apache.commons.math4.exception.MathInternalError;
|
||||||
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
||||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
||||||
|
@ -77,40 +76,6 @@ public class NonLinearConjugateGradientOptimizer
|
||||||
POLAK_RIBIERE
|
POLAK_RIBIERE
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The initial step is a factor with respect to the search direction
|
|
||||||
* (which itself is roughly related to the gradient of the function).
|
|
||||||
* <br/>
|
|
||||||
* It is used to find an interval that brackets the optimum in line
|
|
||||||
* search.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated As of v3.3, this class is not used anymore.
|
|
||||||
* This setting is replaced by the {@code initialBracketingRange}
|
|
||||||
* argument to the new constructors.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class BracketingStep implements OptimizationData {
|
|
||||||
/** Initial step. */
|
|
||||||
private final double initialStep;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param step Initial step for the bracket search.
|
|
||||||
*/
|
|
||||||
public BracketingStep(double step) {
|
|
||||||
initialStep = step;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the initial step.
|
|
||||||
*
|
|
||||||
* @return the initial step.
|
|
||||||
*/
|
|
||||||
public double getBracketingStep() {
|
|
||||||
return initialStep;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor with default tolerances for the line search (1e-8) and
|
* Constructor with default tolerances for the line search (1e-8) and
|
||||||
* {@link IdentityPreconditioner preconditioner}.
|
* {@link IdentityPreconditioner preconditioner}.
|
||||||
|
@ -130,27 +95,6 @@ public class NonLinearConjugateGradientOptimizer
|
||||||
new IdentityPreconditioner());
|
new IdentityPreconditioner());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor with default {@link IdentityPreconditioner preconditioner}.
|
|
||||||
*
|
|
||||||
* @param updateFormula formula to use for updating the β parameter,
|
|
||||||
* must be one of {@link Formula#FLETCHER_REEVES} or
|
|
||||||
* {@link Formula#POLAK_RIBIERE}.
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
* @param lineSearchSolver Solver to use during line search.
|
|
||||||
* @deprecated as of 3.3. Please use
|
|
||||||
* {@link #NonLinearConjugateGradientOptimizer(Formula,ConvergenceChecker,double,double,double)} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public NonLinearConjugateGradientOptimizer(final Formula updateFormula,
|
|
||||||
ConvergenceChecker<PointValuePair> checker,
|
|
||||||
final UnivariateSolver lineSearchSolver) {
|
|
||||||
this(updateFormula,
|
|
||||||
checker,
|
|
||||||
lineSearchSolver,
|
|
||||||
new IdentityPreconditioner());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor with default {@link IdentityPreconditioner preconditioner}.
|
* Constructor with default {@link IdentityPreconditioner preconditioner}.
|
||||||
*
|
*
|
||||||
|
@ -180,29 +124,6 @@ public class NonLinearConjugateGradientOptimizer
|
||||||
new IdentityPreconditioner());
|
new IdentityPreconditioner());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param updateFormula formula to use for updating the β parameter,
|
|
||||||
* must be one of {@link Formula#FLETCHER_REEVES} or
|
|
||||||
* {@link Formula#POLAK_RIBIERE}.
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
* @param lineSearchSolver Solver to use during line search.
|
|
||||||
* @param preconditioner Preconditioner.
|
|
||||||
* @deprecated as of 3.3. Please use
|
|
||||||
* {@link #NonLinearConjugateGradientOptimizer(Formula,ConvergenceChecker,double,double,double,Preconditioner)} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public NonLinearConjugateGradientOptimizer(final Formula updateFormula,
|
|
||||||
ConvergenceChecker<PointValuePair> checker,
|
|
||||||
final UnivariateSolver lineSearchSolver,
|
|
||||||
final Preconditioner preconditioner) {
|
|
||||||
this(updateFormula,
|
|
||||||
checker,
|
|
||||||
lineSearchSolver.getRelativeAccuracy(),
|
|
||||||
lineSearchSolver.getAbsoluteAccuracy(),
|
|
||||||
lineSearchSolver.getAbsoluteAccuracy(),
|
|
||||||
preconditioner);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param updateFormula formula to use for updating the β parameter,
|
* @param updateFormula formula to use for updating the β parameter,
|
||||||
* must be one of {@link Formula#FLETCHER_REEVES} or
|
* must be one of {@link Formula#FLETCHER_REEVES} or
|
||||||
|
|
|
@ -131,6 +131,7 @@ public class SimplexOptimizer extends MultivariateOptimizer {
|
||||||
// evaluations counter.
|
// evaluations counter.
|
||||||
final MultivariateFunction evalFunc
|
final MultivariateFunction evalFunc
|
||||||
= new MultivariateFunction() {
|
= new MultivariateFunction() {
|
||||||
|
@Override
|
||||||
public double value(double[] point) {
|
public double value(double[] point) {
|
||||||
return computeObjectiveValue(point);
|
return computeObjectiveValue(point);
|
||||||
}
|
}
|
||||||
|
@ -139,6 +140,7 @@ public class SimplexOptimizer extends MultivariateOptimizer {
|
||||||
final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
|
final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
|
||||||
final Comparator<PointValuePair> comparator
|
final Comparator<PointValuePair> comparator
|
||||||
= new Comparator<PointValuePair>() {
|
= new Comparator<PointValuePair>() {
|
||||||
|
@Override
|
||||||
public int compare(final PointValuePair o1,
|
public int compare(final PointValuePair o1,
|
||||||
final PointValuePair o2) {
|
final PointValuePair o2) {
|
||||||
final double v1 = o1.getValue();
|
final double v1 = o1.getValue();
|
||||||
|
|
|
@ -1,116 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
|
||||||
import org.apache.commons.math4.exception.DimensionMismatchException;
|
|
||||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
|
||||||
import org.apache.commons.math4.optim.ConvergenceChecker;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Base class for implementing optimizers for multivariate vector
|
|
||||||
* differentiable functions.
|
|
||||||
* It contains boiler-plate code for dealing with Jacobian evaluation.
|
|
||||||
* It assumes that the rows of the Jacobian matrix iterate on the model
|
|
||||||
* functions while the columns iterate on the parameters; thus, the numbers
|
|
||||||
* of rows is equal to the dimension of the {@link Target} while the
|
|
||||||
* number of columns is equal to the dimension of the
|
|
||||||
* {@link org.apache.commons.math4.optim.InitialGuess InitialGuess}.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public abstract class JacobianMultivariateVectorOptimizer
|
|
||||||
extends MultivariateVectorOptimizer {
|
|
||||||
/**
|
|
||||||
* Jacobian of the model function.
|
|
||||||
*/
|
|
||||||
private MultivariateMatrixFunction jacobian;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
protected JacobianMultivariateVectorOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
super(checker);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the Jacobian matrix.
|
|
||||||
*
|
|
||||||
* @param params Point at which the Jacobian must be evaluated.
|
|
||||||
* @return the Jacobian at the specified point.
|
|
||||||
*/
|
|
||||||
protected double[][] computeJacobian(final double[] params) {
|
|
||||||
return jacobian.value(params);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* @param optData Optimization data. In addition to those documented in
|
|
||||||
* {@link MultivariateVectorOptimizer#optimize(OptimizationData...)}
|
|
||||||
* MultivariateOptimizer}, this method will register the following data:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link ModelFunctionJacobian}</li>
|
|
||||||
* </ul>
|
|
||||||
* @return {@inheritDoc}
|
|
||||||
* @throws TooManyEvaluationsException if the maximal number of
|
|
||||||
* evaluations is exceeded.
|
|
||||||
* @throws DimensionMismatchException if the initial guess, target, and weight
|
|
||||||
* arguments have inconsistent dimensions.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public PointVectorValuePair optimize(OptimizationData... optData)
|
|
||||||
throws TooManyEvaluationsException,
|
|
||||||
DimensionMismatchException {
|
|
||||||
// Set up base class and perform computation.
|
|
||||||
return super.optimize(optData);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scans the list of (required and optional) optimization data that
|
|
||||||
* characterize the problem.
|
|
||||||
*
|
|
||||||
* @param optData Optimization data.
|
|
||||||
* The following data will be looked for:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link ModelFunctionJacobian}</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected void parseOptimizationData(OptimizationData... optData) {
|
|
||||||
// Allow base class to register its own data.
|
|
||||||
super.parseOptimizationData(optData);
|
|
||||||
|
|
||||||
// The existing values (as set by the previous call) are reused if
|
|
||||||
// not provided in the argument list.
|
|
||||||
for (OptimizationData data : optData) {
|
|
||||||
if (data instanceof ModelFunctionJacobian) {
|
|
||||||
jacobian = ((ModelFunctionJacobian) data).getModelFunctionJacobian();
|
|
||||||
// If more data must be parsed, this statement _must_ be
|
|
||||||
// changed to "continue".
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Model (vector) function to be optimized.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class ModelFunction implements OptimizationData {
|
|
||||||
/** Function to be optimized. */
|
|
||||||
private final MultivariateVectorFunction model;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param m Model function to be optimized.
|
|
||||||
*/
|
|
||||||
public ModelFunction(MultivariateVectorFunction m) {
|
|
||||||
model = m;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the model function to be optimized.
|
|
||||||
*
|
|
||||||
* @return the model function.
|
|
||||||
*/
|
|
||||||
public MultivariateVectorFunction getModelFunction() {
|
|
||||||
return model;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Jacobian of the model (vector) function to be optimized.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class ModelFunctionJacobian implements OptimizationData {
|
|
||||||
/** Function to be optimized. */
|
|
||||||
private final MultivariateMatrixFunction jacobian;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param j Jacobian of the model function to be optimized.
|
|
||||||
*/
|
|
||||||
public ModelFunctionJacobian(MultivariateMatrixFunction j) {
|
|
||||||
jacobian = j;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the Jacobian of the model function to be optimized.
|
|
||||||
*
|
|
||||||
* @return the model function Jacobian.
|
|
||||||
*/
|
|
||||||
public MultivariateMatrixFunction getModelFunctionJacobian() {
|
|
||||||
return jacobian;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,122 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Comparator;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.exception.NotStrictlyPositiveException;
|
|
||||||
import org.apache.commons.math4.exception.NullArgumentException;
|
|
||||||
import org.apache.commons.math4.linear.ArrayRealVector;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.linear.RealVector;
|
|
||||||
import org.apache.commons.math4.optim.BaseMultiStartMultivariateOptimizer;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
import org.apache.commons.math4.random.RandomVectorGenerator;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Multi-start optimizer for a (vector) model function.
|
|
||||||
*
|
|
||||||
* This class wraps an optimizer in order to use it several times in
|
|
||||||
* turn with different starting points (trying to avoid being trapped
|
|
||||||
* in a local extremum when looking for a global one).
|
|
||||||
*
|
|
||||||
* @since 3.0
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class MultiStartMultivariateVectorOptimizer
|
|
||||||
extends BaseMultiStartMultivariateOptimizer<PointVectorValuePair> {
|
|
||||||
/** Underlying optimizer. */
|
|
||||||
private final MultivariateVectorOptimizer optimizer;
|
|
||||||
/** Found optima. */
|
|
||||||
private final List<PointVectorValuePair> optima = new ArrayList<PointVectorValuePair>();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a multi-start optimizer from a single-start optimizer.
|
|
||||||
*
|
|
||||||
* @param optimizer Single-start optimizer to wrap.
|
|
||||||
* @param starts Number of starts to perform.
|
|
||||||
* If {@code starts == 1}, the result will be same as if {@code optimizer}
|
|
||||||
* is called directly.
|
|
||||||
* @param generator Random vector generator to use for restarts.
|
|
||||||
* @throws NullArgumentException if {@code optimizer} or {@code generator}
|
|
||||||
* is {@code null}.
|
|
||||||
* @throws NotStrictlyPositiveException if {@code starts < 1}.
|
|
||||||
*/
|
|
||||||
public MultiStartMultivariateVectorOptimizer(final MultivariateVectorOptimizer optimizer,
|
|
||||||
final int starts,
|
|
||||||
final RandomVectorGenerator generator)
|
|
||||||
throws NullArgumentException,
|
|
||||||
NotStrictlyPositiveException {
|
|
||||||
super(optimizer, starts, generator);
|
|
||||||
this.optimizer = optimizer;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public PointVectorValuePair[] getOptima() {
|
|
||||||
Collections.sort(optima, getPairComparator());
|
|
||||||
return optima.toArray(new PointVectorValuePair[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected void store(PointVectorValuePair optimum) {
|
|
||||||
optima.add(optimum);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected void clear() {
|
|
||||||
optima.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return a comparator for sorting the optima.
|
|
||||||
*/
|
|
||||||
private Comparator<PointVectorValuePair> getPairComparator() {
|
|
||||||
return new Comparator<PointVectorValuePair>() {
|
|
||||||
private final RealVector target = new ArrayRealVector(optimizer.getTarget(), false);
|
|
||||||
private final RealMatrix weight = optimizer.getWeight();
|
|
||||||
|
|
||||||
public int compare(final PointVectorValuePair o1,
|
|
||||||
final PointVectorValuePair o2) {
|
|
||||||
if (o1 == null) {
|
|
||||||
return (o2 == null) ? 0 : 1;
|
|
||||||
} else if (o2 == null) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return Double.compare(weightedResidual(o1),
|
|
||||||
weightedResidual(o2));
|
|
||||||
}
|
|
||||||
|
|
||||||
private double weightedResidual(final PointVectorValuePair pv) {
|
|
||||||
final RealVector v = new ArrayRealVector(pv.getValueRef(), false);
|
|
||||||
final RealVector r = target.subtract(v);
|
|
||||||
return r.dotProduct(weight.operate(r));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,167 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
|
||||||
import org.apache.commons.math4.exception.DimensionMismatchException;
|
|
||||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.optim.BaseMultivariateOptimizer;
|
|
||||||
import org.apache.commons.math4.optim.ConvergenceChecker;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Base class for a multivariate vector function optimizer.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public abstract class MultivariateVectorOptimizer
|
|
||||||
extends BaseMultivariateOptimizer<PointVectorValuePair> {
|
|
||||||
/** Target values for the model function at optimum. */
|
|
||||||
private double[] target;
|
|
||||||
/** Weight matrix. */
|
|
||||||
private RealMatrix weightMatrix;
|
|
||||||
/** Model function. */
|
|
||||||
private MultivariateVectorFunction model;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
protected MultivariateVectorOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
super(checker);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the objective function value.
|
|
||||||
* This method <em>must</em> be called by subclasses to enforce the
|
|
||||||
* evaluation counter limit.
|
|
||||||
*
|
|
||||||
* @param params Point at which the objective function must be evaluated.
|
|
||||||
* @return the objective function value at the specified point.
|
|
||||||
* @throws TooManyEvaluationsException if the maximal number of evaluations
|
|
||||||
* (of the model vector function) is exceeded.
|
|
||||||
*/
|
|
||||||
protected double[] computeObjectiveValue(double[] params) {
|
|
||||||
super.incrementEvaluationCount();
|
|
||||||
return model.value(params);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* @param optData Optimization data. In addition to those documented in
|
|
||||||
* {@link BaseMultivariateOptimizer#parseOptimizationData(OptimizationData[])
|
|
||||||
* BaseMultivariateOptimizer}, this method will register the following data:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link Target}</li>
|
|
||||||
* <li>{@link Weight}</li>
|
|
||||||
* <li>{@link ModelFunction}</li>
|
|
||||||
* </ul>
|
|
||||||
* @return {@inheritDoc}
|
|
||||||
* @throws TooManyEvaluationsException if the maximal number of
|
|
||||||
* evaluations is exceeded.
|
|
||||||
* @throws DimensionMismatchException if the initial guess, target, and weight
|
|
||||||
* arguments have inconsistent dimensions.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public PointVectorValuePair optimize(OptimizationData... optData)
|
|
||||||
throws TooManyEvaluationsException,
|
|
||||||
DimensionMismatchException {
|
|
||||||
// Set up base class and perform computation.
|
|
||||||
return super.optimize(optData);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the weight matrix of the observations.
|
|
||||||
*
|
|
||||||
* @return the weight matrix.
|
|
||||||
*/
|
|
||||||
public RealMatrix getWeight() {
|
|
||||||
return weightMatrix.copy();
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Gets the observed values to be matched by the objective vector
|
|
||||||
* function.
|
|
||||||
*
|
|
||||||
* @return the target values.
|
|
||||||
*/
|
|
||||||
public double[] getTarget() {
|
|
||||||
return target.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the number of observed values.
|
|
||||||
*
|
|
||||||
* @return the length of the target vector.
|
|
||||||
*/
|
|
||||||
public int getTargetSize() {
|
|
||||||
return target.length;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scans the list of (required and optional) optimization data that
|
|
||||||
* characterize the problem.
|
|
||||||
*
|
|
||||||
* @param optData Optimization data. The following data will be looked for:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link Target}</li>
|
|
||||||
* <li>{@link Weight}</li>
|
|
||||||
* <li>{@link ModelFunction}</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected void parseOptimizationData(OptimizationData... optData) {
|
|
||||||
// Allow base class to register its own data.
|
|
||||||
super.parseOptimizationData(optData);
|
|
||||||
|
|
||||||
// The existing values (as set by the previous call) are reused if
|
|
||||||
// not provided in the argument list.
|
|
||||||
for (OptimizationData data : optData) {
|
|
||||||
if (data instanceof ModelFunction) {
|
|
||||||
model = ((ModelFunction) data).getModelFunction();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (data instanceof Target) {
|
|
||||||
target = ((Target) data).getTarget();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (data instanceof Weight) {
|
|
||||||
weightMatrix = ((Weight) data).getWeight();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check input consistency.
|
|
||||||
checkParameters();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check parameters consistency.
|
|
||||||
*
|
|
||||||
* @throws DimensionMismatchException if {@link #target} and
|
|
||||||
* {@link #weightMatrix} have inconsistent dimensions.
|
|
||||||
*/
|
|
||||||
private void checkParameters() {
|
|
||||||
if (target.length != weightMatrix.getColumnDimension()) {
|
|
||||||
throw new DimensionMismatchException(target.length,
|
|
||||||
weightMatrix.getColumnDimension());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Target of the optimization procedure.
|
|
||||||
* They are the values which the objective vector function must reproduce
|
|
||||||
* When the parameters of the model have been optimized.
|
|
||||||
* <br/>
|
|
||||||
* Immutable class.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class Target implements OptimizationData {
|
|
||||||
/** Target values (of the objective vector function). */
|
|
||||||
private final double[] target;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param observations Target values.
|
|
||||||
*/
|
|
||||||
public Target(double[] observations) {
|
|
||||||
target = observations.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the initial guess.
|
|
||||||
*
|
|
||||||
* @return the initial guess.
|
|
||||||
*/
|
|
||||||
public double[] getTarget() {
|
|
||||||
return target.clone();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.linear.DiagonalMatrix;
|
|
||||||
import org.apache.commons.math4.linear.NonSquareMatrixException;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Weight matrix of the residuals between model and observations.
|
|
||||||
* <br/>
|
|
||||||
* Immutable class.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class Weight implements OptimizationData {
|
|
||||||
/** Weight matrix. */
|
|
||||||
private final RealMatrix weightMatrix;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a diagonal weight matrix.
|
|
||||||
*
|
|
||||||
* @param weight List of the values of the diagonal.
|
|
||||||
*/
|
|
||||||
public Weight(double[] weight) {
|
|
||||||
weightMatrix = new DiagonalMatrix(weight);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param weight Weight matrix.
|
|
||||||
* @throws NonSquareMatrixException if the argument is not
|
|
||||||
* a square matrix.
|
|
||||||
*/
|
|
||||||
public Weight(RealMatrix weight) {
|
|
||||||
if (weight.getColumnDimension() != weight.getRowDimension()) {
|
|
||||||
throw new NonSquareMatrixException(weight.getColumnDimension(),
|
|
||||||
weight.getRowDimension());
|
|
||||||
}
|
|
||||||
|
|
||||||
weightMatrix = weight.copy();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the initial guess.
|
|
||||||
*
|
|
||||||
* @return the initial guess.
|
|
||||||
*/
|
|
||||||
public RealMatrix getWeight() {
|
|
||||||
return weightMatrix.copy();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,281 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.exception.DimensionMismatchException;
|
|
||||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
|
||||||
import org.apache.commons.math4.linear.ArrayRealVector;
|
|
||||||
import org.apache.commons.math4.linear.DecompositionSolver;
|
|
||||||
import org.apache.commons.math4.linear.DiagonalMatrix;
|
|
||||||
import org.apache.commons.math4.linear.EigenDecomposition;
|
|
||||||
import org.apache.commons.math4.linear.MatrixUtils;
|
|
||||||
import org.apache.commons.math4.linear.QRDecomposition;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.optim.ConvergenceChecker;
|
|
||||||
import org.apache.commons.math4.optim.OptimizationData;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
import org.apache.commons.math4.optim.nonlinear.vector.JacobianMultivariateVectorOptimizer;
|
|
||||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
|
||||||
import org.apache.commons.math4.util.FastMath;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Base class for implementing least-squares optimizers.
|
|
||||||
* It provides methods for error estimation.
|
|
||||||
*
|
|
||||||
* @since 3.1
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public abstract class AbstractLeastSquaresOptimizer
|
|
||||||
extends JacobianMultivariateVectorOptimizer {
|
|
||||||
/** Square-root of the weight matrix. */
|
|
||||||
private RealMatrix weightMatrixSqrt;
|
|
||||||
/** Cost value (square root of the sum of the residuals). */
|
|
||||||
private double cost;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
protected AbstractLeastSquaresOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
super(checker);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the weighted Jacobian matrix.
|
|
||||||
*
|
|
||||||
* @param params Model parameters at which to compute the Jacobian.
|
|
||||||
* @return the weighted Jacobian: W<sup>1/2</sup> J.
|
|
||||||
* @throws DimensionMismatchException if the Jacobian dimension does not
|
|
||||||
* match problem dimension.
|
|
||||||
*/
|
|
||||||
protected RealMatrix computeWeightedJacobian(double[] params) {
|
|
||||||
return weightMatrixSqrt.multiply(MatrixUtils.createRealMatrix(computeJacobian(params)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the cost.
|
|
||||||
*
|
|
||||||
* @param residuals Residuals.
|
|
||||||
* @return the cost.
|
|
||||||
* @see #computeResiduals(double[])
|
|
||||||
*/
|
|
||||||
protected double computeCost(double[] residuals) {
|
|
||||||
final ArrayRealVector r = new ArrayRealVector(residuals);
|
|
||||||
return FastMath.sqrt(r.dotProduct(getWeight().operate(r)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the root-mean-square (RMS) value.
|
|
||||||
*
|
|
||||||
* The RMS the root of the arithmetic mean of the square of all weighted
|
|
||||||
* residuals.
|
|
||||||
* This is related to the criterion that is minimized by the optimizer
|
|
||||||
* as follows: If <em>c</em> if the criterion, and <em>n</em> is the
|
|
||||||
* number of measurements, then the RMS is <em>sqrt (c/n)</em>.
|
|
||||||
*
|
|
||||||
* @return the RMS value.
|
|
||||||
*/
|
|
||||||
public double getRMS() {
|
|
||||||
return FastMath.sqrt(getChiSquare() / getTargetSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get a Chi-Square-like value assuming the N residuals follow N
|
|
||||||
* distinct normal distributions centered on 0 and whose variances are
|
|
||||||
* the reciprocal of the weights.
|
|
||||||
* @return chi-square value
|
|
||||||
*/
|
|
||||||
public double getChiSquare() {
|
|
||||||
return cost * cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the square-root of the weight matrix.
|
|
||||||
*
|
|
||||||
* @return the square-root of the weight matrix.
|
|
||||||
*/
|
|
||||||
public RealMatrix getWeightSquareRoot() {
|
|
||||||
return weightMatrixSqrt.copy();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the cost.
|
|
||||||
*
|
|
||||||
* @param cost Cost value.
|
|
||||||
*/
|
|
||||||
protected void setCost(double cost) {
|
|
||||||
this.cost = cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the covariance matrix of the optimized parameters.
|
|
||||||
* <br/>
|
|
||||||
* Note that this operation involves the inversion of the
|
|
||||||
* <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
|
|
||||||
* Jacobian matrix.
|
|
||||||
* The {@code threshold} parameter is a way for the caller to specify
|
|
||||||
* that the result of this computation should be considered meaningless,
|
|
||||||
* and thus trigger an exception.
|
|
||||||
*
|
|
||||||
* @param params Model parameters.
|
|
||||||
* @param threshold Singularity threshold.
|
|
||||||
* @return the covariance matrix.
|
|
||||||
* @throws org.apache.commons.math4.linear.SingularMatrixException
|
|
||||||
* if the covariance matrix cannot be computed (singular problem).
|
|
||||||
*/
|
|
||||||
public double[][] computeCovariances(double[] params,
|
|
||||||
double threshold) {
|
|
||||||
// Set up the Jacobian.
|
|
||||||
final RealMatrix j = computeWeightedJacobian(params);
|
|
||||||
|
|
||||||
// Compute transpose(J)J.
|
|
||||||
final RealMatrix jTj = j.transpose().multiply(j);
|
|
||||||
|
|
||||||
// Compute the covariances matrix.
|
|
||||||
final DecompositionSolver solver
|
|
||||||
= new QRDecomposition(jTj, threshold).getSolver();
|
|
||||||
return solver.getInverse().getData();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes an estimate of the standard deviation of the parameters. The
|
|
||||||
* returned values are the square root of the diagonal coefficients of the
|
|
||||||
* covariance matrix, {@code sd(a[i]) ~= sqrt(C[i][i])}, where {@code a[i]}
|
|
||||||
* is the optimized value of the {@code i}-th parameter, and {@code C} is
|
|
||||||
* the covariance matrix.
|
|
||||||
*
|
|
||||||
* @param params Model parameters.
|
|
||||||
* @param covarianceSingularityThreshold Singularity threshold (see
|
|
||||||
* {@link #computeCovariances(double[],double) computeCovariances}).
|
|
||||||
* @return an estimate of the standard deviation of the optimized parameters
|
|
||||||
* @throws org.apache.commons.math4.linear.SingularMatrixException
|
|
||||||
* if the covariance matrix cannot be computed.
|
|
||||||
*/
|
|
||||||
public double[] computeSigma(double[] params,
|
|
||||||
double covarianceSingularityThreshold) {
|
|
||||||
final int nC = params.length;
|
|
||||||
final double[] sig = new double[nC];
|
|
||||||
final double[][] cov = computeCovariances(params, covarianceSingularityThreshold);
|
|
||||||
for (int i = 0; i < nC; ++i) {
|
|
||||||
sig[i] = FastMath.sqrt(cov[i][i]);
|
|
||||||
}
|
|
||||||
return sig;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* @param optData Optimization data. In addition to those documented in
|
|
||||||
* {@link JacobianMultivariateVectorOptimizer#parseOptimizationData(OptimizationData[])
|
|
||||||
* JacobianMultivariateVectorOptimizer}, this method will register the following data:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link org.apache.commons.math4.optim.nonlinear.vector.Weight}</li>
|
|
||||||
* </ul>
|
|
||||||
* @return {@inheritDoc}
|
|
||||||
* @throws TooManyEvaluationsException if the maximal number of
|
|
||||||
* evaluations is exceeded.
|
|
||||||
* @throws DimensionMismatchException if the initial guess, target, and weight
|
|
||||||
* arguments have inconsistent dimensions.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public PointVectorValuePair optimize(OptimizationData... optData)
|
|
||||||
throws TooManyEvaluationsException {
|
|
||||||
// Set up base class and perform computation.
|
|
||||||
return super.optimize(optData);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the residuals.
|
|
||||||
* The residual is the difference between the observed (target)
|
|
||||||
* values and the model (objective function) value.
|
|
||||||
* There is one residual for each element of the vector-valued
|
|
||||||
* function.
|
|
||||||
*
|
|
||||||
* @param objectiveValue Value of the the objective function. This is
|
|
||||||
* the value returned from a call to
|
|
||||||
* {@link #computeObjectiveValue(double[]) computeObjectiveValue}
|
|
||||||
* (whose array argument contains the model parameters).
|
|
||||||
* @return the residuals.
|
|
||||||
* @throws DimensionMismatchException if {@code params} has a wrong
|
|
||||||
* length.
|
|
||||||
*/
|
|
||||||
protected double[] computeResiduals(double[] objectiveValue) {
|
|
||||||
final double[] target = getTarget();
|
|
||||||
if (objectiveValue.length != target.length) {
|
|
||||||
throw new DimensionMismatchException(target.length,
|
|
||||||
objectiveValue.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
final double[] residuals = new double[target.length];
|
|
||||||
for (int i = 0; i < target.length; i++) {
|
|
||||||
residuals[i] = target[i] - objectiveValue[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return residuals;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scans the list of (required and optional) optimization data that
|
|
||||||
* characterize the problem.
|
|
||||||
* If the weight matrix is specified, the {@link #weightMatrixSqrt}
|
|
||||||
* field is recomputed.
|
|
||||||
*
|
|
||||||
* @param optData Optimization data. The following data will be looked for:
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link Weight}</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected void parseOptimizationData(OptimizationData... optData) {
|
|
||||||
// Allow base class to register its own data.
|
|
||||||
super.parseOptimizationData(optData);
|
|
||||||
|
|
||||||
// The existing values (as set by the previous call) are reused if
|
|
||||||
// not provided in the argument list.
|
|
||||||
for (OptimizationData data : optData) {
|
|
||||||
if (data instanceof Weight) {
|
|
||||||
weightMatrixSqrt = squareRoot(((Weight) data).getWeight());
|
|
||||||
// If more data must be parsed, this statement _must_ be
|
|
||||||
// changed to "continue".
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Computes the square-root of the weight matrix.
|
|
||||||
*
|
|
||||||
* @param m Symmetric, positive-definite (weight) matrix.
|
|
||||||
* @return the square-root of the weight matrix.
|
|
||||||
*/
|
|
||||||
private RealMatrix squareRoot(RealMatrix m) {
|
|
||||||
if (m instanceof DiagonalMatrix) {
|
|
||||||
final int dim = m.getRowDimension();
|
|
||||||
final RealMatrix sqrtM = new DiagonalMatrix(dim);
|
|
||||||
for (int i = 0; i < dim; i++) {
|
|
||||||
sqrtM.setEntry(i, i, FastMath.sqrt(m.getEntry(i, i)));
|
|
||||||
}
|
|
||||||
return sqrtM;
|
|
||||||
} else {
|
|
||||||
final EigenDecomposition dec = new EigenDecomposition(m);
|
|
||||||
return dec.getSquareRoot();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,183 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.exception.ConvergenceException;
|
|
||||||
import org.apache.commons.math4.exception.MathInternalError;
|
|
||||||
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
|
||||||
import org.apache.commons.math4.exception.NullArgumentException;
|
|
||||||
import org.apache.commons.math4.exception.util.LocalizedFormats;
|
|
||||||
import org.apache.commons.math4.linear.ArrayRealVector;
|
|
||||||
import org.apache.commons.math4.linear.BlockRealMatrix;
|
|
||||||
import org.apache.commons.math4.linear.DecompositionSolver;
|
|
||||||
import org.apache.commons.math4.linear.LUDecomposition;
|
|
||||||
import org.apache.commons.math4.linear.QRDecomposition;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.linear.SingularMatrixException;
|
|
||||||
import org.apache.commons.math4.optim.ConvergenceChecker;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gauss-Newton least-squares solver.
|
|
||||||
* <br/>
|
|
||||||
* Constraints are not supported: the call to
|
|
||||||
* {@link #optimize(OptimizationData[]) optimize} will throw
|
|
||||||
* {@link MathUnsupportedOperationException} if bounds are passed to it.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* This class solve a least-square problem by solving the normal equations
|
|
||||||
* of the linearized problem at each iteration. Either LU decomposition or
|
|
||||||
* QR decomposition can be used to solve the normal equations. LU decomposition
|
|
||||||
* is faster but QR decomposition is more robust for difficult problems.
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* @since 2.0
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer {
|
|
||||||
/** Indicator for using LU decomposition. */
|
|
||||||
private final boolean useLU;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple constructor with default settings.
|
|
||||||
* The normal equations will be solved using LU decomposition.
|
|
||||||
*
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
public GaussNewtonOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
this(true, checker);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param useLU If {@code true}, the normal equations will be solved
|
|
||||||
* using LU decomposition, otherwise they will be solved using QR
|
|
||||||
* decomposition.
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
public GaussNewtonOptimizer(final boolean useLU,
|
|
||||||
ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
super(checker);
|
|
||||||
this.useLU = useLU;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
@Override
|
|
||||||
public PointVectorValuePair doOptimize() {
|
|
||||||
checkParameters();
|
|
||||||
|
|
||||||
final ConvergenceChecker<PointVectorValuePair> checker
|
|
||||||
= getConvergenceChecker();
|
|
||||||
|
|
||||||
// Computation will be useless without a checker (see "for-loop").
|
|
||||||
if (checker == null) {
|
|
||||||
throw new NullArgumentException();
|
|
||||||
}
|
|
||||||
|
|
||||||
final double[] targetValues = getTarget();
|
|
||||||
final int nR = targetValues.length; // Number of observed data.
|
|
||||||
|
|
||||||
final RealMatrix weightMatrix = getWeight();
|
|
||||||
// Diagonal of the weight matrix.
|
|
||||||
final double[] residualsWeights = new double[nR];
|
|
||||||
for (int i = 0; i < nR; i++) {
|
|
||||||
residualsWeights[i] = weightMatrix.getEntry(i, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
final double[] currentPoint = getStartPoint();
|
|
||||||
final int nC = currentPoint.length;
|
|
||||||
|
|
||||||
// iterate until convergence is reached
|
|
||||||
PointVectorValuePair current = null;
|
|
||||||
for (boolean converged = false; !converged;) {
|
|
||||||
incrementIterationCount();
|
|
||||||
|
|
||||||
// evaluate the objective function and its jacobian
|
|
||||||
PointVectorValuePair previous = current;
|
|
||||||
// Value of the objective function at "currentPoint".
|
|
||||||
final double[] currentObjective = computeObjectiveValue(currentPoint);
|
|
||||||
final double[] currentResiduals = computeResiduals(currentObjective);
|
|
||||||
final RealMatrix weightedJacobian = computeWeightedJacobian(currentPoint);
|
|
||||||
current = new PointVectorValuePair(currentPoint, currentObjective);
|
|
||||||
|
|
||||||
// build the linear problem
|
|
||||||
final double[] b = new double[nC];
|
|
||||||
final double[][] a = new double[nC][nC];
|
|
||||||
for (int i = 0; i < nR; ++i) {
|
|
||||||
|
|
||||||
final double[] grad = weightedJacobian.getRow(i);
|
|
||||||
final double weight = residualsWeights[i];
|
|
||||||
final double residual = currentResiduals[i];
|
|
||||||
|
|
||||||
// compute the normal equation
|
|
||||||
final double wr = weight * residual;
|
|
||||||
for (int j = 0; j < nC; ++j) {
|
|
||||||
b[j] += wr * grad[j];
|
|
||||||
}
|
|
||||||
|
|
||||||
// build the contribution matrix for measurement i
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
double[] ak = a[k];
|
|
||||||
double wgk = weight * grad[k];
|
|
||||||
for (int l = 0; l < nC; ++l) {
|
|
||||||
ak[l] += wgk * grad[l];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check convergence.
|
|
||||||
if (previous != null) {
|
|
||||||
converged = checker.converged(getIterations(), previous, current);
|
|
||||||
if (converged) {
|
|
||||||
setCost(computeCost(currentResiduals));
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// solve the linearized least squares problem
|
|
||||||
RealMatrix mA = new BlockRealMatrix(a);
|
|
||||||
DecompositionSolver solver = useLU ?
|
|
||||||
new LUDecomposition(mA).getSolver() :
|
|
||||||
new QRDecomposition(mA).getSolver();
|
|
||||||
final double[] dX = solver.solve(new ArrayRealVector(b, false)).toArray();
|
|
||||||
// update the estimated parameters
|
|
||||||
for (int i = 0; i < nC; ++i) {
|
|
||||||
currentPoint[i] += dX[i];
|
|
||||||
}
|
|
||||||
} catch (SingularMatrixException e) {
|
|
||||||
throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Must never happen.
|
|
||||||
throw new MathInternalError();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws MathUnsupportedOperationException if bounds were passed to the
|
|
||||||
* {@link #optimize(OptimizationData[]) optimize} method.
|
|
||||||
*/
|
|
||||||
private void checkParameters() {
|
|
||||||
if (getLowerBound() != null ||
|
|
||||||
getUpperBound() != null) {
|
|
||||||
throw new MathUnsupportedOperationException(LocalizedFormats.CONSTRAINT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,962 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.apache.commons.math4.exception.ConvergenceException;
|
|
||||||
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
|
||||||
import org.apache.commons.math4.exception.util.LocalizedFormats;
|
|
||||||
import org.apache.commons.math4.linear.RealMatrix;
|
|
||||||
import org.apache.commons.math4.optim.ConvergenceChecker;
|
|
||||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
|
||||||
import org.apache.commons.math4.util.FastMath;
|
|
||||||
import org.apache.commons.math4.util.Precision;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class solves a least-squares problem using the Levenberg-Marquardt
|
|
||||||
* algorithm.
|
|
||||||
* <br/>
|
|
||||||
* Constraints are not supported: the call to
|
|
||||||
* {@link #optimize(OptimizationData[]) optimize} will throw
|
|
||||||
* {@link MathUnsupportedOperationException} if bounds are passed to it.
|
|
||||||
*
|
|
||||||
* <p>This implementation <em>should</em> work even for over-determined systems
|
|
||||||
* (i.e. systems having more point than equations). Over-determined systems
|
|
||||||
* are solved by ignoring the point which have the smallest impact according
|
|
||||||
* to their jacobian column norm. Only the rank of the matrix and some loop bounds
|
|
||||||
* are changed to implement this.</p>
|
|
||||||
*
|
|
||||||
* <p>The resolution engine is a simple translation of the MINPACK <a
|
|
||||||
* href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
|
|
||||||
* changes. The changes include the over-determined resolution, the use of
|
|
||||||
* inherited convergence checker and the Q.R. decomposition which has been
|
|
||||||
* rewritten following the algorithm described in the
|
|
||||||
* P. Lascaux and R. Theodor book <i>Analyse numérique matricielle
|
|
||||||
* appliquée à l'art de l'ingénieur</i>, Masson 1986.</p>
|
|
||||||
* <p>The authors of the original fortran version are:
|
|
||||||
* <ul>
|
|
||||||
* <li>Argonne National Laboratory. MINPACK project. March 1980</li>
|
|
||||||
* <li>Burton S. Garbow</li>
|
|
||||||
* <li>Kenneth E. Hillstrom</li>
|
|
||||||
* <li>Jorge J. More</li>
|
|
||||||
* </ul>
|
|
||||||
* The redistribution policy for MINPACK is available <a
|
|
||||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
|
|
||||||
* is reproduced below.</p>
|
|
||||||
*
|
|
||||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
|
||||||
* <tr><td>
|
|
||||||
* Minpack Copyright Notice (1999) University of Chicago.
|
|
||||||
* All rights reserved
|
|
||||||
* </td></tr>
|
|
||||||
* <tr><td>
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions
|
|
||||||
* are met:
|
|
||||||
* <ol>
|
|
||||||
* <li>Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.</li>
|
|
||||||
* <li>Redistributions in binary form must reproduce the above
|
|
||||||
* copyright notice, this list of conditions and the following
|
|
||||||
* disclaimer in the documentation and/or other materials provided
|
|
||||||
* with the distribution.</li>
|
|
||||||
* <li>The end-user documentation included with the redistribution, if any,
|
|
||||||
* must include the following acknowledgment:
|
|
||||||
* <code>This product includes software developed by the University of
|
|
||||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
|
||||||
* Alternately, this acknowledgment may appear in the software itself,
|
|
||||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
|
||||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
|
||||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
|
||||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
|
||||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
|
||||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
|
||||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
|
||||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
|
||||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
|
||||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
|
||||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
|
||||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
|
||||||
* BE CORRECTED.</strong></li>
|
|
||||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
|
||||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
|
||||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
|
||||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
|
||||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
|
||||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
|
||||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
|
||||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
|
||||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
|
||||||
* <ol></td></tr>
|
|
||||||
* </table>
|
|
||||||
*
|
|
||||||
* @since 2.0
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class LevenbergMarquardtOptimizer
|
|
||||||
extends AbstractLeastSquaresOptimizer {
|
|
||||||
/** Twice the "epsilon machine". */
|
|
||||||
private static final double TWO_EPS = 2 * Precision.EPSILON;
|
|
||||||
/** Number of solved point. */
|
|
||||||
private int solvedCols;
|
|
||||||
/** Diagonal elements of the R matrix in the Q.R. decomposition. */
|
|
||||||
private double[] diagR;
|
|
||||||
/** Norms of the columns of the jacobian matrix. */
|
|
||||||
private double[] jacNorm;
|
|
||||||
/** Coefficients of the Householder transforms vectors. */
|
|
||||||
private double[] beta;
|
|
||||||
/** Columns permutation array. */
|
|
||||||
private int[] permutation;
|
|
||||||
/** Rank of the jacobian matrix. */
|
|
||||||
private int rank;
|
|
||||||
/** Levenberg-Marquardt parameter. */
|
|
||||||
private double lmPar;
|
|
||||||
/** Parameters evolution direction associated with lmPar. */
|
|
||||||
private double[] lmDir;
|
|
||||||
/** Positive input variable used in determining the initial step bound. */
|
|
||||||
private final double initialStepBoundFactor;
|
|
||||||
/** Desired relative error in the sum of squares. */
|
|
||||||
private final double costRelativeTolerance;
|
|
||||||
/** Desired relative error in the approximate solution parameters. */
|
|
||||||
private final double parRelativeTolerance;
|
|
||||||
/** Desired max cosine on the orthogonality between the function vector
|
|
||||||
* and the columns of the jacobian. */
|
|
||||||
private final double orthoTolerance;
|
|
||||||
/** Threshold for QR ranking. */
|
|
||||||
private final double qrRankingThreshold;
|
|
||||||
/** Weighted residuals. */
|
|
||||||
private double[] weightedResidual;
|
|
||||||
/** Weighted Jacobian. */
|
|
||||||
private double[][] weightedJacobian;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Build an optimizer for least squares problems with default values
|
|
||||||
* for all the tuning parameters (see the {@link
|
|
||||||
* #LevenbergMarquardtOptimizer(double,double,double,double,double)
|
|
||||||
* other contructor}.
|
|
||||||
* The default values for the algorithm settings are:
|
|
||||||
* <ul>
|
|
||||||
* <li>Initial step bound factor: 100</li>
|
|
||||||
* <li>Cost relative tolerance: 1e-10</li>
|
|
||||||
* <li>Parameters relative tolerance: 1e-10</li>
|
|
||||||
* <li>Orthogonality tolerance: 1e-10</li>
|
|
||||||
* <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
public LevenbergMarquardtOptimizer() {
|
|
||||||
this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor that allows the specification of a custom convergence
|
|
||||||
* checker.
|
|
||||||
* Note that all the usual convergence checks will be <em>disabled</em>.
|
|
||||||
* The default values for the algorithm settings are:
|
|
||||||
* <ul>
|
|
||||||
* <li>Initial step bound factor: 100</li>
|
|
||||||
* <li>Cost relative tolerance: 1e-10</li>
|
|
||||||
* <li>Parameters relative tolerance: 1e-10</li>
|
|
||||||
* <li>Orthogonality tolerance: 1e-10</li>
|
|
||||||
* <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
*/
|
|
||||||
public LevenbergMarquardtOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
|
|
||||||
this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor that allows the specification of a custom convergence
|
|
||||||
* checker, in addition to the standard ones.
|
|
||||||
*
|
|
||||||
* @param initialStepBoundFactor Positive input variable used in
|
|
||||||
* determining the initial step bound. This bound is set to the
|
|
||||||
* product of initialStepBoundFactor and the euclidean norm of
|
|
||||||
* {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
|
|
||||||
* itself. In most cases factor should lie in the interval
|
|
||||||
* {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
|
|
||||||
* @param checker Convergence checker.
|
|
||||||
* @param costRelativeTolerance Desired relative error in the sum of
|
|
||||||
* squares.
|
|
||||||
* @param parRelativeTolerance Desired relative error in the approximate
|
|
||||||
* solution parameters.
|
|
||||||
* @param orthoTolerance Desired max cosine on the orthogonality between
|
|
||||||
* the function vector and the columns of the Jacobian.
|
|
||||||
* @param threshold Desired threshold for QR ranking. If the squared norm
|
|
||||||
* of a column vector is smaller or equal to this threshold during QR
|
|
||||||
* decomposition, it is considered to be a zero vector and hence the rank
|
|
||||||
* of the matrix is reduced.
|
|
||||||
*/
|
|
||||||
public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
|
|
||||||
ConvergenceChecker<PointVectorValuePair> checker,
|
|
||||||
double costRelativeTolerance,
|
|
||||||
double parRelativeTolerance,
|
|
||||||
double orthoTolerance,
|
|
||||||
double threshold) {
|
|
||||||
super(checker);
|
|
||||||
this.initialStepBoundFactor = initialStepBoundFactor;
|
|
||||||
this.costRelativeTolerance = costRelativeTolerance;
|
|
||||||
this.parRelativeTolerance = parRelativeTolerance;
|
|
||||||
this.orthoTolerance = orthoTolerance;
|
|
||||||
this.qrRankingThreshold = threshold;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Build an optimizer for least squares problems with default values
|
|
||||||
* for some of the tuning parameters (see the {@link
|
|
||||||
* #LevenbergMarquardtOptimizer(double,double,double,double,double)
|
|
||||||
* other contructor}.
|
|
||||||
* The default values for the algorithm settings are:
|
|
||||||
* <ul>
|
|
||||||
* <li>Initial step bound factor}: 100</li>
|
|
||||||
* <li>QR ranking threshold}: {@link Precision#SAFE_MIN}</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* @param costRelativeTolerance Desired relative error in the sum of
|
|
||||||
* squares.
|
|
||||||
* @param parRelativeTolerance Desired relative error in the approximate
|
|
||||||
* solution parameters.
|
|
||||||
* @param orthoTolerance Desired max cosine on the orthogonality between
|
|
||||||
* the function vector and the columns of the Jacobian.
|
|
||||||
*/
|
|
||||||
public LevenbergMarquardtOptimizer(double costRelativeTolerance,
|
|
||||||
double parRelativeTolerance,
|
|
||||||
double orthoTolerance) {
|
|
||||||
this(100,
|
|
||||||
costRelativeTolerance, parRelativeTolerance, orthoTolerance,
|
|
||||||
Precision.SAFE_MIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The arguments control the behaviour of the default convergence checking
|
|
||||||
* procedure.
|
|
||||||
* Additional criteria can defined through the setting of a {@link
|
|
||||||
* ConvergenceChecker}.
|
|
||||||
*
|
|
||||||
* @param initialStepBoundFactor Positive input variable used in
|
|
||||||
* determining the initial step bound. This bound is set to the
|
|
||||||
* product of initialStepBoundFactor and the euclidean norm of
|
|
||||||
* {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
|
|
||||||
* itself. In most cases factor should lie in the interval
|
|
||||||
* {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
|
|
||||||
* @param costRelativeTolerance Desired relative error in the sum of
|
|
||||||
* squares.
|
|
||||||
* @param parRelativeTolerance Desired relative error in the approximate
|
|
||||||
* solution parameters.
|
|
||||||
* @param orthoTolerance Desired max cosine on the orthogonality between
|
|
||||||
* the function vector and the columns of the Jacobian.
|
|
||||||
* @param threshold Desired threshold for QR ranking. If the squared norm
|
|
||||||
* of a column vector is smaller or equal to this threshold during QR
|
|
||||||
* decomposition, it is considered to be a zero vector and hence the rank
|
|
||||||
* of the matrix is reduced.
|
|
||||||
*/
|
|
||||||
public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
|
|
||||||
double costRelativeTolerance,
|
|
||||||
double parRelativeTolerance,
|
|
||||||
double orthoTolerance,
|
|
||||||
double threshold) {
|
|
||||||
super(null); // No custom convergence criterion.
|
|
||||||
this.initialStepBoundFactor = initialStepBoundFactor;
|
|
||||||
this.costRelativeTolerance = costRelativeTolerance;
|
|
||||||
this.parRelativeTolerance = parRelativeTolerance;
|
|
||||||
this.orthoTolerance = orthoTolerance;
|
|
||||||
this.qrRankingThreshold = threshold;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
@Override
|
|
||||||
protected PointVectorValuePair doOptimize() {
|
|
||||||
checkParameters();
|
|
||||||
|
|
||||||
final int nR = getTarget().length; // Number of observed data.
|
|
||||||
final double[] currentPoint = getStartPoint();
|
|
||||||
final int nC = currentPoint.length; // Number of parameters.
|
|
||||||
|
|
||||||
// arrays shared with the other private methods
|
|
||||||
solvedCols = FastMath.min(nR, nC);
|
|
||||||
diagR = new double[nC];
|
|
||||||
jacNorm = new double[nC];
|
|
||||||
beta = new double[nC];
|
|
||||||
permutation = new int[nC];
|
|
||||||
lmDir = new double[nC];
|
|
||||||
|
|
||||||
// local point
|
|
||||||
double delta = 0;
|
|
||||||
double xNorm = 0;
|
|
||||||
double[] diag = new double[nC];
|
|
||||||
double[] oldX = new double[nC];
|
|
||||||
double[] oldRes = new double[nR];
|
|
||||||
double[] oldObj = new double[nR];
|
|
||||||
double[] qtf = new double[nR];
|
|
||||||
double[] work1 = new double[nC];
|
|
||||||
double[] work2 = new double[nC];
|
|
||||||
double[] work3 = new double[nC];
|
|
||||||
|
|
||||||
final RealMatrix weightMatrixSqrt = getWeightSquareRoot();
|
|
||||||
|
|
||||||
// Evaluate the function at the starting point and calculate its norm.
|
|
||||||
double[] currentObjective = computeObjectiveValue(currentPoint);
|
|
||||||
double[] currentResiduals = computeResiduals(currentObjective);
|
|
||||||
PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
|
|
||||||
double currentCost = computeCost(currentResiduals);
|
|
||||||
|
|
||||||
// Outer loop.
|
|
||||||
lmPar = 0;
|
|
||||||
boolean firstIteration = true;
|
|
||||||
final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker();
|
|
||||||
while (true) {
|
|
||||||
incrementIterationCount();
|
|
||||||
|
|
||||||
final PointVectorValuePair previous = current;
|
|
||||||
|
|
||||||
// QR decomposition of the jacobian matrix
|
|
||||||
qrDecomposition(computeWeightedJacobian(currentPoint));
|
|
||||||
|
|
||||||
weightedResidual = weightMatrixSqrt.operate(currentResiduals);
|
|
||||||
for (int i = 0; i < nR; i++) {
|
|
||||||
qtf[i] = weightedResidual[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute Qt.res
|
|
||||||
qTy(qtf);
|
|
||||||
|
|
||||||
// now we don't need Q anymore,
|
|
||||||
// so let jacobian contain the R matrix with its diagonal elements
|
|
||||||
for (int k = 0; k < solvedCols; ++k) {
|
|
||||||
int pk = permutation[k];
|
|
||||||
weightedJacobian[k][pk] = diagR[pk];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (firstIteration) {
|
|
||||||
// scale the point according to the norms of the columns
|
|
||||||
// of the initial jacobian
|
|
||||||
xNorm = 0;
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
double dk = jacNorm[k];
|
|
||||||
if (dk == 0) {
|
|
||||||
dk = 1.0;
|
|
||||||
}
|
|
||||||
double xk = dk * currentPoint[k];
|
|
||||||
xNorm += xk * xk;
|
|
||||||
diag[k] = dk;
|
|
||||||
}
|
|
||||||
xNorm = FastMath.sqrt(xNorm);
|
|
||||||
|
|
||||||
// initialize the step bound delta
|
|
||||||
delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
|
|
||||||
}
|
|
||||||
|
|
||||||
// check orthogonality between function vector and jacobian columns
|
|
||||||
double maxCosine = 0;
|
|
||||||
if (currentCost != 0) {
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double s = jacNorm[pj];
|
|
||||||
if (s != 0) {
|
|
||||||
double sum = 0;
|
|
||||||
for (int i = 0; i <= j; ++i) {
|
|
||||||
sum += weightedJacobian[i][pj] * qtf[i];
|
|
||||||
}
|
|
||||||
maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (maxCosine <= orthoTolerance) {
|
|
||||||
// Convergence has been reached.
|
|
||||||
setCost(currentCost);
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
|
|
||||||
// rescale if necessary
|
|
||||||
for (int j = 0; j < nC; ++j) {
|
|
||||||
diag[j] = FastMath.max(diag[j], jacNorm[j]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inner loop.
|
|
||||||
for (double ratio = 0; ratio < 1.0e-4;) {
|
|
||||||
|
|
||||||
// save the state
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
oldX[pj] = currentPoint[pj];
|
|
||||||
}
|
|
||||||
final double previousCost = currentCost;
|
|
||||||
double[] tmpVec = weightedResidual;
|
|
||||||
weightedResidual = oldRes;
|
|
||||||
oldRes = tmpVec;
|
|
||||||
tmpVec = currentObjective;
|
|
||||||
currentObjective = oldObj;
|
|
||||||
oldObj = tmpVec;
|
|
||||||
|
|
||||||
// determine the Levenberg-Marquardt parameter
|
|
||||||
determineLMParameter(qtf, delta, diag, work1, work2, work3);
|
|
||||||
|
|
||||||
// compute the new point and the norm of the evolution direction
|
|
||||||
double lmNorm = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
lmDir[pj] = -lmDir[pj];
|
|
||||||
currentPoint[pj] = oldX[pj] + lmDir[pj];
|
|
||||||
double s = diag[pj] * lmDir[pj];
|
|
||||||
lmNorm += s * s;
|
|
||||||
}
|
|
||||||
lmNorm = FastMath.sqrt(lmNorm);
|
|
||||||
// on the first iteration, adjust the initial step bound.
|
|
||||||
if (firstIteration) {
|
|
||||||
delta = FastMath.min(delta, lmNorm);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate the function at x + p and calculate its norm.
|
|
||||||
currentObjective = computeObjectiveValue(currentPoint);
|
|
||||||
currentResiduals = computeResiduals(currentObjective);
|
|
||||||
current = new PointVectorValuePair(currentPoint, currentObjective);
|
|
||||||
currentCost = computeCost(currentResiduals);
|
|
||||||
|
|
||||||
// compute the scaled actual reduction
|
|
||||||
double actRed = -1.0;
|
|
||||||
if (0.1 * currentCost < previousCost) {
|
|
||||||
double r = currentCost / previousCost;
|
|
||||||
actRed = 1.0 - r * r;
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute the scaled predicted reduction
|
|
||||||
// and the scaled directional derivative
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double dirJ = lmDir[pj];
|
|
||||||
work1[j] = 0;
|
|
||||||
for (int i = 0; i <= j; ++i) {
|
|
||||||
work1[i] += weightedJacobian[i][pj] * dirJ;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
double coeff1 = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
coeff1 += work1[j] * work1[j];
|
|
||||||
}
|
|
||||||
double pc2 = previousCost * previousCost;
|
|
||||||
coeff1 /= pc2;
|
|
||||||
double coeff2 = lmPar * lmNorm * lmNorm / pc2;
|
|
||||||
double preRed = coeff1 + 2 * coeff2;
|
|
||||||
double dirDer = -(coeff1 + coeff2);
|
|
||||||
|
|
||||||
// ratio of the actual to the predicted reduction
|
|
||||||
ratio = (preRed == 0) ? 0 : (actRed / preRed);
|
|
||||||
|
|
||||||
// update the step bound
|
|
||||||
if (ratio <= 0.25) {
|
|
||||||
double tmp =
|
|
||||||
(actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
|
|
||||||
if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
|
|
||||||
tmp = 0.1;
|
|
||||||
}
|
|
||||||
delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
|
|
||||||
lmPar /= tmp;
|
|
||||||
} else if ((lmPar == 0) || (ratio >= 0.75)) {
|
|
||||||
delta = 2 * lmNorm;
|
|
||||||
lmPar *= 0.5;
|
|
||||||
}
|
|
||||||
|
|
||||||
// test for successful iteration.
|
|
||||||
if (ratio >= 1.0e-4) {
|
|
||||||
// successful iteration, update the norm
|
|
||||||
firstIteration = false;
|
|
||||||
xNorm = 0;
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
double xK = diag[k] * currentPoint[k];
|
|
||||||
xNorm += xK * xK;
|
|
||||||
}
|
|
||||||
xNorm = FastMath.sqrt(xNorm);
|
|
||||||
|
|
||||||
// tests for convergence.
|
|
||||||
if (checker != null && checker.converged(getIterations(), previous, current)) {
|
|
||||||
setCost(currentCost);
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// failed iteration, reset the previous values
|
|
||||||
currentCost = previousCost;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
currentPoint[pj] = oldX[pj];
|
|
||||||
}
|
|
||||||
tmpVec = weightedResidual;
|
|
||||||
weightedResidual = oldRes;
|
|
||||||
oldRes = tmpVec;
|
|
||||||
tmpVec = currentObjective;
|
|
||||||
currentObjective = oldObj;
|
|
||||||
oldObj = tmpVec;
|
|
||||||
// Reset "current" to previous values.
|
|
||||||
current = new PointVectorValuePair(currentPoint, currentObjective);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default convergence criteria.
|
|
||||||
if ((FastMath.abs(actRed) <= costRelativeTolerance &&
|
|
||||||
preRed <= costRelativeTolerance &&
|
|
||||||
ratio <= 2.0) ||
|
|
||||||
delta <= parRelativeTolerance * xNorm) {
|
|
||||||
setCost(currentCost);
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
|
|
||||||
// tests for termination and stringent tolerances
|
|
||||||
if (FastMath.abs(actRed) <= TWO_EPS &&
|
|
||||||
preRed <= TWO_EPS &&
|
|
||||||
ratio <= 2.0) {
|
|
||||||
throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
|
|
||||||
costRelativeTolerance);
|
|
||||||
} else if (delta <= TWO_EPS * xNorm) {
|
|
||||||
throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
|
|
||||||
parRelativeTolerance);
|
|
||||||
} else if (maxCosine <= TWO_EPS) {
|
|
||||||
throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
|
|
||||||
orthoTolerance);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determine the Levenberg-Marquardt parameter.
|
|
||||||
* <p>This implementation is a translation in Java of the MINPACK
|
|
||||||
* <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
|
|
||||||
* routine.</p>
|
|
||||||
* <p>This method sets the lmPar and lmDir attributes.</p>
|
|
||||||
* <p>The authors of the original fortran function are:</p>
|
|
||||||
* <ul>
|
|
||||||
* <li>Argonne National Laboratory. MINPACK project. March 1980</li>
|
|
||||||
* <li>Burton S. Garbow</li>
|
|
||||||
* <li>Kenneth E. Hillstrom</li>
|
|
||||||
* <li>Jorge J. More</li>
|
|
||||||
* </ul>
|
|
||||||
* <p>Luc Maisonobe did the Java translation.</p>
|
|
||||||
*
|
|
||||||
* @param qy array containing qTy
|
|
||||||
* @param delta upper bound on the euclidean norm of diagR * lmDir
|
|
||||||
* @param diag diagonal matrix
|
|
||||||
* @param work1 work array
|
|
||||||
* @param work2 work array
|
|
||||||
* @param work3 work array
|
|
||||||
*/
|
|
||||||
private void determineLMParameter(double[] qy, double delta, double[] diag,
|
|
||||||
double[] work1, double[] work2, double[] work3) {
|
|
||||||
final int nC = weightedJacobian[0].length;
|
|
||||||
|
|
||||||
// compute and store in x the gauss-newton direction, if the
|
|
||||||
// jacobian is rank-deficient, obtain a least squares solution
|
|
||||||
for (int j = 0; j < rank; ++j) {
|
|
||||||
lmDir[permutation[j]] = qy[j];
|
|
||||||
}
|
|
||||||
for (int j = rank; j < nC; ++j) {
|
|
||||||
lmDir[permutation[j]] = 0;
|
|
||||||
}
|
|
||||||
for (int k = rank - 1; k >= 0; --k) {
|
|
||||||
int pk = permutation[k];
|
|
||||||
double ypk = lmDir[pk] / diagR[pk];
|
|
||||||
for (int i = 0; i < k; ++i) {
|
|
||||||
lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
|
|
||||||
}
|
|
||||||
lmDir[pk] = ypk;
|
|
||||||
}
|
|
||||||
|
|
||||||
// evaluate the function at the origin, and test
|
|
||||||
// for acceptance of the Gauss-Newton direction
|
|
||||||
double dxNorm = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double s = diag[pj] * lmDir[pj];
|
|
||||||
work1[pj] = s;
|
|
||||||
dxNorm += s * s;
|
|
||||||
}
|
|
||||||
dxNorm = FastMath.sqrt(dxNorm);
|
|
||||||
double fp = dxNorm - delta;
|
|
||||||
if (fp <= 0.1 * delta) {
|
|
||||||
lmPar = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the jacobian is not rank deficient, the Newton step provides
|
|
||||||
// a lower bound, parl, for the zero of the function,
|
|
||||||
// otherwise set this bound to zero
|
|
||||||
double sum2;
|
|
||||||
double parl = 0;
|
|
||||||
if (rank == solvedCols) {
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
work1[pj] *= diag[pj] / dxNorm;
|
|
||||||
}
|
|
||||||
sum2 = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double sum = 0;
|
|
||||||
for (int i = 0; i < j; ++i) {
|
|
||||||
sum += weightedJacobian[i][pj] * work1[permutation[i]];
|
|
||||||
}
|
|
||||||
double s = (work1[pj] - sum) / diagR[pj];
|
|
||||||
work1[pj] = s;
|
|
||||||
sum2 += s * s;
|
|
||||||
}
|
|
||||||
parl = fp / (delta * sum2);
|
|
||||||
}
|
|
||||||
|
|
||||||
// calculate an upper bound, paru, for the zero of the function
|
|
||||||
sum2 = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double sum = 0;
|
|
||||||
for (int i = 0; i <= j; ++i) {
|
|
||||||
sum += weightedJacobian[i][pj] * qy[i];
|
|
||||||
}
|
|
||||||
sum /= diag[pj];
|
|
||||||
sum2 += sum * sum;
|
|
||||||
}
|
|
||||||
double gNorm = FastMath.sqrt(sum2);
|
|
||||||
double paru = gNorm / delta;
|
|
||||||
if (paru == 0) {
|
|
||||||
paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the input par lies outside of the interval (parl,paru),
|
|
||||||
// set par to the closer endpoint
|
|
||||||
lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
|
|
||||||
if (lmPar == 0) {
|
|
||||||
lmPar = gNorm / dxNorm;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int countdown = 10; countdown >= 0; --countdown) {
|
|
||||||
|
|
||||||
// evaluate the function at the current value of lmPar
|
|
||||||
if (lmPar == 0) {
|
|
||||||
lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
|
|
||||||
}
|
|
||||||
double sPar = FastMath.sqrt(lmPar);
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
work1[pj] = sPar * diag[pj];
|
|
||||||
}
|
|
||||||
determineLMDirection(qy, work1, work2, work3);
|
|
||||||
|
|
||||||
dxNorm = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double s = diag[pj] * lmDir[pj];
|
|
||||||
work3[pj] = s;
|
|
||||||
dxNorm += s * s;
|
|
||||||
}
|
|
||||||
dxNorm = FastMath.sqrt(dxNorm);
|
|
||||||
double previousFP = fp;
|
|
||||||
fp = dxNorm - delta;
|
|
||||||
|
|
||||||
// if the function is small enough, accept the current value
|
|
||||||
// of lmPar, also test for the exceptional cases where parl is zero
|
|
||||||
if ((FastMath.abs(fp) <= 0.1 * delta) ||
|
|
||||||
((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute the Newton correction
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
work1[pj] = work3[pj] * diag[pj] / dxNorm;
|
|
||||||
}
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
work1[pj] /= work2[j];
|
|
||||||
double tmp = work1[pj];
|
|
||||||
for (int i = j + 1; i < solvedCols; ++i) {
|
|
||||||
work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sum2 = 0;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
double s = work1[permutation[j]];
|
|
||||||
sum2 += s * s;
|
|
||||||
}
|
|
||||||
double correction = fp / (delta * sum2);
|
|
||||||
|
|
||||||
// depending on the sign of the function, update parl or paru.
|
|
||||||
if (fp > 0) {
|
|
||||||
parl = FastMath.max(parl, lmPar);
|
|
||||||
} else if (fp < 0) {
|
|
||||||
paru = FastMath.min(paru, lmPar);
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute an improved estimate for lmPar
|
|
||||||
lmPar = FastMath.max(parl, lmPar + correction);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Solve a*x = b and d*x = 0 in the least squares sense.
|
|
||||||
* <p>This implementation is a translation in Java of the MINPACK
|
|
||||||
* <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
|
|
||||||
* routine.</p>
|
|
||||||
* <p>This method sets the lmDir and lmDiag attributes.</p>
|
|
||||||
* <p>The authors of the original fortran function are:</p>
|
|
||||||
* <ul>
|
|
||||||
* <li>Argonne National Laboratory. MINPACK project. March 1980</li>
|
|
||||||
* <li>Burton S. Garbow</li>
|
|
||||||
* <li>Kenneth E. Hillstrom</li>
|
|
||||||
* <li>Jorge J. More</li>
|
|
||||||
* </ul>
|
|
||||||
* <p>Luc Maisonobe did the Java translation.</p>
|
|
||||||
*
|
|
||||||
* @param qy array containing qTy
|
|
||||||
* @param diag diagonal matrix
|
|
||||||
* @param lmDiag diagonal elements associated with lmDir
|
|
||||||
* @param work work array
|
|
||||||
*/
|
|
||||||
private void determineLMDirection(double[] qy, double[] diag,
|
|
||||||
double[] lmDiag, double[] work) {
|
|
||||||
|
|
||||||
// copy R and Qty to preserve input and initialize s
|
|
||||||
// in particular, save the diagonal elements of R in lmDir
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
for (int i = j + 1; i < solvedCols; ++i) {
|
|
||||||
weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
|
|
||||||
}
|
|
||||||
lmDir[j] = diagR[pj];
|
|
||||||
work[j] = qy[j];
|
|
||||||
}
|
|
||||||
|
|
||||||
// eliminate the diagonal matrix d using a Givens rotation
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
|
|
||||||
// prepare the row of d to be eliminated, locating the
|
|
||||||
// diagonal element using p from the Q.R. factorization
|
|
||||||
int pj = permutation[j];
|
|
||||||
double dpj = diag[pj];
|
|
||||||
if (dpj != 0) {
|
|
||||||
Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
|
|
||||||
}
|
|
||||||
lmDiag[j] = dpj;
|
|
||||||
|
|
||||||
// the transformations to eliminate the row of d
|
|
||||||
// modify only a single element of Qty
|
|
||||||
// beyond the first n, which is initially zero.
|
|
||||||
double qtbpj = 0;
|
|
||||||
for (int k = j; k < solvedCols; ++k) {
|
|
||||||
int pk = permutation[k];
|
|
||||||
|
|
||||||
// determine a Givens rotation which eliminates the
|
|
||||||
// appropriate element in the current row of d
|
|
||||||
if (lmDiag[k] != 0) {
|
|
||||||
|
|
||||||
final double sin;
|
|
||||||
final double cos;
|
|
||||||
double rkk = weightedJacobian[k][pk];
|
|
||||||
if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
|
|
||||||
final double cotan = rkk / lmDiag[k];
|
|
||||||
sin = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
|
|
||||||
cos = sin * cotan;
|
|
||||||
} else {
|
|
||||||
final double tan = lmDiag[k] / rkk;
|
|
||||||
cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
|
|
||||||
sin = cos * tan;
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute the modified diagonal element of R and
|
|
||||||
// the modified element of (Qty,0)
|
|
||||||
weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
|
|
||||||
final double temp = cos * work[k] + sin * qtbpj;
|
|
||||||
qtbpj = -sin * work[k] + cos * qtbpj;
|
|
||||||
work[k] = temp;
|
|
||||||
|
|
||||||
// accumulate the tranformation in the row of s
|
|
||||||
for (int i = k + 1; i < solvedCols; ++i) {
|
|
||||||
double rik = weightedJacobian[i][pk];
|
|
||||||
final double temp2 = cos * rik + sin * lmDiag[i];
|
|
||||||
lmDiag[i] = -sin * rik + cos * lmDiag[i];
|
|
||||||
weightedJacobian[i][pk] = temp2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// store the diagonal element of s and restore
|
|
||||||
// the corresponding diagonal element of R
|
|
||||||
lmDiag[j] = weightedJacobian[j][permutation[j]];
|
|
||||||
weightedJacobian[j][permutation[j]] = lmDir[j];
|
|
||||||
}
|
|
||||||
|
|
||||||
// solve the triangular system for z, if the system is
|
|
||||||
// singular, then obtain a least squares solution
|
|
||||||
int nSing = solvedCols;
|
|
||||||
for (int j = 0; j < solvedCols; ++j) {
|
|
||||||
if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
|
|
||||||
nSing = j;
|
|
||||||
}
|
|
||||||
if (nSing < solvedCols) {
|
|
||||||
work[j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (nSing > 0) {
|
|
||||||
for (int j = nSing - 1; j >= 0; --j) {
|
|
||||||
int pj = permutation[j];
|
|
||||||
double sum = 0;
|
|
||||||
for (int i = j + 1; i < nSing; ++i) {
|
|
||||||
sum += weightedJacobian[i][pj] * work[i];
|
|
||||||
}
|
|
||||||
work[j] = (work[j] - sum) / lmDiag[j];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// permute the components of z back to components of lmDir
|
|
||||||
for (int j = 0; j < lmDir.length; ++j) {
|
|
||||||
lmDir[permutation[j]] = work[j];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Decompose a matrix A as A.P = Q.R using Householder transforms.
|
|
||||||
* <p>As suggested in the P. Lascaux and R. Theodor book
|
|
||||||
* <i>Analyse numérique matricielle appliquée à
|
|
||||||
* l'art de l'ingénieur</i> (Masson, 1986), instead of representing
|
|
||||||
* the Householder transforms with u<sub>k</sub> unit vectors such that:
|
|
||||||
* <pre>
|
|
||||||
* H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
|
|
||||||
* </pre>
|
|
||||||
* we use <sub>k</sub> non-unit vectors such that:
|
|
||||||
* <pre>
|
|
||||||
* H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
|
|
||||||
* </pre>
|
|
||||||
* where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
|
|
||||||
* The beta<sub>k</sub> coefficients are provided upon exit as recomputing
|
|
||||||
* them from the v<sub>k</sub> vectors would be costly.</p>
|
|
||||||
* <p>This decomposition handles rank deficient cases since the tranformations
|
|
||||||
* are performed in non-increasing columns norms order thanks to columns
|
|
||||||
* pivoting. The diagonal elements of the R matrix are therefore also in
|
|
||||||
* non-increasing absolute values order.</p>
|
|
||||||
*
|
|
||||||
* @param jacobian Weighted Jacobian matrix at the current point.
|
|
||||||
* @exception ConvergenceException if the decomposition cannot be performed
|
|
||||||
*/
|
|
||||||
private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
|
|
||||||
// Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
|
|
||||||
// hence the multiplication by -1.
|
|
||||||
weightedJacobian = jacobian.scalarMultiply(-1).getData();
|
|
||||||
|
|
||||||
final int nR = weightedJacobian.length;
|
|
||||||
final int nC = weightedJacobian[0].length;
|
|
||||||
|
|
||||||
// initializations
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
permutation[k] = k;
|
|
||||||
double norm2 = 0;
|
|
||||||
for (int i = 0; i < nR; ++i) {
|
|
||||||
double akk = weightedJacobian[i][k];
|
|
||||||
norm2 += akk * akk;
|
|
||||||
}
|
|
||||||
jacNorm[k] = FastMath.sqrt(norm2);
|
|
||||||
}
|
|
||||||
|
|
||||||
// transform the matrix column after column
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
|
|
||||||
// select the column with the greatest norm on active components
|
|
||||||
int nextColumn = -1;
|
|
||||||
double ak2 = Double.NEGATIVE_INFINITY;
|
|
||||||
for (int i = k; i < nC; ++i) {
|
|
||||||
double norm2 = 0;
|
|
||||||
for (int j = k; j < nR; ++j) {
|
|
||||||
double aki = weightedJacobian[j][permutation[i]];
|
|
||||||
norm2 += aki * aki;
|
|
||||||
}
|
|
||||||
if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
|
|
||||||
throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
|
|
||||||
nR, nC);
|
|
||||||
}
|
|
||||||
if (norm2 > ak2) {
|
|
||||||
nextColumn = i;
|
|
||||||
ak2 = norm2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (ak2 <= qrRankingThreshold) {
|
|
||||||
rank = k;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
int pk = permutation[nextColumn];
|
|
||||||
permutation[nextColumn] = permutation[k];
|
|
||||||
permutation[k] = pk;
|
|
||||||
|
|
||||||
// choose alpha such that Hk.u = alpha ek
|
|
||||||
double akk = weightedJacobian[k][pk];
|
|
||||||
double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
|
|
||||||
double betak = 1.0 / (ak2 - akk * alpha);
|
|
||||||
beta[pk] = betak;
|
|
||||||
|
|
||||||
// transform the current column
|
|
||||||
diagR[pk] = alpha;
|
|
||||||
weightedJacobian[k][pk] -= alpha;
|
|
||||||
|
|
||||||
// transform the remaining columns
|
|
||||||
for (int dk = nC - 1 - k; dk > 0; --dk) {
|
|
||||||
double gamma = 0;
|
|
||||||
for (int j = k; j < nR; ++j) {
|
|
||||||
gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
|
|
||||||
}
|
|
||||||
gamma *= betak;
|
|
||||||
for (int j = k; j < nR; ++j) {
|
|
||||||
weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rank = solvedCols;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Compute the product Qt.y for some Q.R. decomposition.
|
|
||||||
*
|
|
||||||
* @param y vector to multiply (will be overwritten with the result)
|
|
||||||
*/
|
|
||||||
private void qTy(double[] y) {
|
|
||||||
final int nR = weightedJacobian.length;
|
|
||||||
final int nC = weightedJacobian[0].length;
|
|
||||||
|
|
||||||
for (int k = 0; k < nC; ++k) {
|
|
||||||
int pk = permutation[k];
|
|
||||||
double gamma = 0;
|
|
||||||
for (int i = k; i < nR; ++i) {
|
|
||||||
gamma += weightedJacobian[i][pk] * y[i];
|
|
||||||
}
|
|
||||||
gamma *= beta[pk];
|
|
||||||
for (int i = k; i < nR; ++i) {
|
|
||||||
y[i] -= gamma * weightedJacobian[i][pk];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws MathUnsupportedOperationException if bounds were passed to the
|
|
||||||
* {@link #optimize(OptimizationData[]) optimize} method.
|
|
||||||
*/
|
|
||||||
private void checkParameters() {
|
|
||||||
if (getLowerBound() != null ||
|
|
||||||
getUpperBound() != null) {
|
|
||||||
throw new MathUnsupportedOperationException(LocalizedFormats.CONSTRAINT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This package provides optimization algorithms that require derivatives.
|
|
||||||
*
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
|
|
@ -1,26 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership.
|
|
||||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
* (the "License"); you may not use this file except in compliance with
|
|
||||||
* the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Algorithms for optimizing a vector function.
|
|
||||||
*
|
|
||||||
* @deprecated All classes and interfaces in this package are deprecated.
|
|
||||||
* The optimizers that were provided here were moved to the
|
|
||||||
* {@link org.apache.commons.math4.fitting.leastsquares} package
|
|
||||||
* (cf. MATH-1008).
|
|
||||||
*/
|
|
||||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
|
|
@ -45,9 +45,9 @@ public class MultiStartUnivariateOptimizer
|
||||||
/** Number of evaluations already performed for all starts. */
|
/** Number of evaluations already performed for all starts. */
|
||||||
private int totalEvaluations;
|
private int totalEvaluations;
|
||||||
/** Number of starts to go. */
|
/** Number of starts to go. */
|
||||||
private int starts;
|
private final int starts;
|
||||||
/** Random generator for multi-start. */
|
/** Random generator for multi-start. */
|
||||||
private RandomGenerator generator;
|
private final RandomGenerator generator;
|
||||||
/** Found optima. */
|
/** Found optima. */
|
||||||
private UnivariatePointValuePair[] optima;
|
private UnivariatePointValuePair[] optima;
|
||||||
/** Optimization data. */
|
/** Optimization data. */
|
||||||
|
@ -211,6 +211,7 @@ public class MultiStartUnivariateOptimizer
|
||||||
*/
|
*/
|
||||||
private void sortPairs(final GoalType goal) {
|
private void sortPairs(final GoalType goal) {
|
||||||
Arrays.sort(optima, new Comparator<UnivariatePointValuePair>() {
|
Arrays.sort(optima, new Comparator<UnivariatePointValuePair>() {
|
||||||
|
@Override
|
||||||
public int compare(final UnivariatePointValuePair o1,
|
public int compare(final UnivariatePointValuePair o1,
|
||||||
final UnivariatePointValuePair o2) {
|
final UnivariatePointValuePair o2) {
|
||||||
if (o1 == null) {
|
if (o1 == null) {
|
||||||
|
|
Loading…
Reference in New Issue