From c37f06ed3ad2de8d49a80ac46aae3ab7748598a4 Mon Sep 17 00:00:00 2001
From: Luc Maisonobe
Date: Sun, 15 Mar 2009 19:11:02 +0000
Subject: [PATCH] adapted old Levenberg-Marquardt estimator to new top level
optimizers API
git-svn-id: https://svn.apache.org/repos/asf/commons/proper/math/trunk@754727 13f79535-47bb-0310-9956-ffa450edef68
---
.../VectorialDifferentiableOptimizer.java | 12 +-
.../general/AbstractEstimator.java | 312 -------
.../AbstractLeastSquaresOptimizer.java | 36 +-
.../general/EstimatedParameter.java | 124 ---
.../general/EstimationProblem.java | 65 --
.../math/optimization/general/Estimator.java | 93 --
.../general/GaussNewtonEstimator.java | 227 -----
.../general/LevenbergMarquardtEstimator.java | 873 ------------------
.../general/LevenbergMarquardtOptimizer.java | 838 +++++++++++++++++
.../general/SimpleEstimationProblem.java | 108 ---
.../general/WeightedMeasurement.java | 170 ----
.../general/EstimatedParameterTest.java | 75 --
.../LevenbergMarquardtEstimatorTest.java | 846 -----------------
.../LevenbergMarquardtOptimizerTest.java | 649 +++++++++++++
.../optimization/general/MinpackTest.java | 589 ++++++------
.../general/WeightedMeasurementTest.java | 124 ---
16 files changed, 1811 insertions(+), 3330 deletions(-)
delete mode 100644 src/java/org/apache/commons/math/optimization/general/AbstractEstimator.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/EstimatedParameter.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/EstimationProblem.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/Estimator.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/GaussNewtonEstimator.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimator.java
create mode 100644 src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/SimpleEstimationProblem.java
delete mode 100644 src/java/org/apache/commons/math/optimization/general/WeightedMeasurement.java
delete mode 100644 src/test/org/apache/commons/math/optimization/general/EstimatedParameterTest.java
delete mode 100644 src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimatorTest.java
create mode 100644 src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizerTest.java
delete mode 100644 src/test/org/apache/commons/math/optimization/general/WeightedMeasurementTest.java
diff --git a/src/java/org/apache/commons/math/optimization/VectorialDifferentiableOptimizer.java b/src/java/org/apache/commons/math/optimization/VectorialDifferentiableOptimizer.java
index c79113189..95f1d66df 100644
--- a/src/java/org/apache/commons/math/optimization/VectorialDifferentiableOptimizer.java
+++ b/src/java/org/apache/commons/math/optimization/VectorialDifferentiableOptimizer.java
@@ -60,7 +60,17 @@ public interface VectorialDifferentiableOptimizer extends Serializable {
*
* @return number of evaluations of the objective function
*/
- int getEvaluations();
+ int getEvaluations();
+
+ /** Get the number of evaluations of the objective function jacobian .
+ *
+ * The number of evaluation correspond to the last call to the
+ * {@link #optimize(ObjectiveFunction, GoalType, double[]) optimize}
+ * method. It is 0 if the method has not been called yet.
+ *
+ * @return number of evaluations of the objective function jacobian
+ */
+ int getJacobianEvaluations();
/** Set the convergence checker.
* @param checker object to use to check for convergence
diff --git a/src/java/org/apache/commons/math/optimization/general/AbstractEstimator.java b/src/java/org/apache/commons/math/optimization/general/AbstractEstimator.java
deleted file mode 100644
index 01ed15cc1..000000000
--- a/src/java/org/apache/commons/math/optimization/general/AbstractEstimator.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.util.Arrays;
-
-import org.apache.commons.math.linear.InvalidMatrixException;
-import org.apache.commons.math.linear.MatrixUtils;
-import org.apache.commons.math.linear.RealMatrix;
-import org.apache.commons.math.linear.decomposition.LUDecompositionImpl;
-import org.apache.commons.math.optimization.OptimizationException;
-
-/**
- * Base class for implementing estimators.
- * This base class handles the boilerplates methods associated to thresholds
- * settings, jacobian and error estimation.
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-public abstract class AbstractEstimator implements Estimator {
-
- /** Default maximal number of cost evaluations allowed. */
- public static final int DEFAULT_MAX_COST_EVALUATIONS = 100;
-
- /**
- * Build an abstract estimator for least squares problems.
- * The maximal number of cost evaluations allowed is set
- * to its default value {@link #DEFAULT_MAX_COST_EVALUATIONS}.
- */
- protected AbstractEstimator() {
- setMaxCostEval(DEFAULT_MAX_COST_EVALUATIONS);
- }
-
- /**
- * Set the maximal number of cost evaluations allowed.
- *
- * @param maxCostEval maximal number of cost evaluations allowed
- * @see #estimate
- */
- public final void setMaxCostEval(int maxCostEval) {
- this.maxCostEval = maxCostEval;
- }
-
- /**
- * Get the number of cost evaluations.
- *
- * @return number of cost evaluations
- * */
- public final int getCostEvaluations() {
- return costEvaluations;
- }
-
- /**
- * Get the number of jacobian evaluations.
- *
- * @return number of jacobian evaluations
- * */
- public final int getJacobianEvaluations() {
- return jacobianEvaluations;
- }
-
- /**
- * Update the jacobian matrix.
- */
- protected void updateJacobian() {
- incrementJacobianEvaluationsCounter();
- Arrays.fill(jacobian, 0);
- for (int i = 0, index = 0; i < rows; i++) {
- WeightedMeasurement wm = measurements[i];
- double factor = -Math.sqrt(wm.getWeight());
- for (int j = 0; j < cols; ++j) {
- jacobian[index++] = factor * wm.getPartial(parameters[j]);
- }
- }
- }
-
- /**
- * Increment the jacobian evaluations counter.
- */
- protected final void incrementJacobianEvaluationsCounter() {
- ++jacobianEvaluations;
- }
-
- /**
- * Update the residuals array and cost function value.
- * @exception OptimizationException if the number of cost evaluations
- * exceeds the maximum allowed
- */
- protected void updateResidualsAndCost()
- throws OptimizationException {
-
- if (++costEvaluations > maxCostEval) {
- throw new OptimizationException("maximal number of evaluations exceeded ({0})",
- maxCostEval);
- }
-
- cost = 0;
- for (int i = 0, index = 0; i < rows; i++, index += cols) {
- WeightedMeasurement wm = measurements[i];
- double residual = wm.getResidual();
- residuals[i] = Math.sqrt(wm.getWeight()) * residual;
- cost += wm.getWeight() * residual * residual;
- }
- cost = Math.sqrt(cost);
-
- }
-
- /**
- * Get the Root Mean Square value.
- * Get the Root Mean Square value, i.e. the root of the arithmetic
- * mean of the square of all weighted residuals. This is related to the
- * criterion that is minimized by the estimator as follows: if
- * c if the criterion, and n is the number of
- * measurements, then the RMS is sqrt (c/n).
- *
- * @param problem estimation problem
- * @return RMS value
- */
- public double getRMS(EstimationProblem problem) {
- WeightedMeasurement[] wm = problem.getMeasurements();
- double criterion = 0;
- for (int i = 0; i < wm.length; ++i) {
- double residual = wm[i].getResidual();
- criterion += wm[i].getWeight() * residual * residual;
- }
- return Math.sqrt(criterion / wm.length);
- }
-
- /**
- * Get the Chi-Square value.
- * @param problem estimation problem
- * @return chi-square value
- */
- public double getChiSquare(EstimationProblem problem) {
- WeightedMeasurement[] wm = problem.getMeasurements();
- double chiSquare = 0;
- for (int i = 0; i < wm.length; ++i) {
- double residual = wm[i].getResidual();
- chiSquare += residual * residual / wm[i].getWeight();
- }
- return chiSquare;
- }
-
- /**
- * Get the covariance matrix of unbound estimated parameters.
- * @param problem estimation problem
- * @return covariance matrix
- * @exception OptimizationException if the covariance matrix
- * cannot be computed (singular problem)
- */
- public double[][] getCovariances(EstimationProblem problem)
- throws OptimizationException {
-
- // set up the jacobian
- updateJacobian();
-
- // compute transpose(J).J, avoiding building big intermediate matrices
- final int rows = problem.getMeasurements().length;
- final int cols = problem.getUnboundParameters().length;
- final int max = cols * rows;
- double[][] jTj = new double[cols][cols];
- for (int i = 0; i < cols; ++i) {
- for (int j = i; j < cols; ++j) {
- double sum = 0;
- for (int k = 0; k < max; k += cols) {
- sum += jacobian[k + i] * jacobian[k + j];
- }
- jTj[i][j] = sum;
- jTj[j][i] = sum;
- }
- }
-
- try {
- // compute the covariances matrix
- RealMatrix inverse =
- new LUDecompositionImpl(MatrixUtils.createRealMatrix(jTj)).getSolver().getInverse();
- return inverse.getData();
- } catch (InvalidMatrixException ime) {
- throw new OptimizationException("unable to compute covariances: singular problem");
- }
-
- }
-
- /**
- * Guess the errors in unbound estimated parameters.
- * Guessing is covariance-based, it only gives rough order of magnitude.
- * @param problem estimation problem
- * @return errors in estimated parameters
- * @exception OptimizationException if the covariances matrix cannot be computed
- * or the number of degrees of freedom is not positive (number of measurements
- * lesser or equal to number of parameters)
- */
- public double[] guessParametersErrors(EstimationProblem problem)
- throws OptimizationException {
- int m = problem.getMeasurements().length;
- int p = problem.getUnboundParameters().length;
- if (m <= p) {
- throw new OptimizationException(
- "no degrees of freedom ({0} measurements, {1} parameters)",
- m, p);
- }
- double[] errors = new double[problem.getUnboundParameters().length];
- final double c = Math.sqrt(getChiSquare(problem) / (m - p));
- double[][] covar = getCovariances(problem);
- for (int i = 0; i < errors.length; ++i) {
- errors[i] = Math.sqrt(covar[i][i]) * c;
- }
- return errors;
- }
-
- /**
- * Initialization of the common parts of the estimation.
- * This method must be called at the start
- * of the {@link #estimate(EstimationProblem) estimate}
- * method.
- * @param problem estimation problem to solve
- */
- protected void initializeEstimate(EstimationProblem problem) {
-
- // reset counters
- costEvaluations = 0;
- jacobianEvaluations = 0;
-
- // retrieve the equations and the parameters
- measurements = problem.getMeasurements();
- parameters = problem.getUnboundParameters();
-
- // arrays shared with the other private methods
- rows = measurements.length;
- cols = parameters.length;
- jacobian = new double[rows * cols];
- residuals = new double[rows];
-
- cost = Double.POSITIVE_INFINITY;
-
- }
-
- /**
- * Solve an estimation problem.
- *
- * The method should set the parameters of the problem to several
- * trial values until it reaches convergence. If this method returns
- * normally (i.e. without throwing an exception), then the best
- * estimate of the parameters can be retrieved from the problem
- * itself, through the {@link EstimationProblem#getAllParameters
- * EstimationProblem.getAllParameters} method.
- *
- * @param problem estimation problem to solve
- * @exception OptimizationException if the problem cannot be solved
- *
- */
- public abstract void estimate(EstimationProblem problem)
- throws OptimizationException;
-
- /** Array of measurements. */
- protected WeightedMeasurement[] measurements;
-
- /** Array of parameters. */
- protected EstimatedParameter[] parameters;
-
- /**
- * Jacobian matrix.
- * This matrix is in canonical form just after the calls to
- * {@link #updateJacobian()}, but may be modified by the solver
- * in the derived class (the {@link LevenbergMarquardtEstimator
- * Levenberg-Marquardt estimator} does this).
- */
- protected double[] jacobian;
-
- /** Number of columns of the jacobian matrix. */
- protected int cols;
-
- /** Number of rows of the jacobian matrix. */
- protected int rows;
-
- /** Residuals array.
- * This array is in canonical form just after the calls to
- * {@link #updateJacobian()}, but may be modified by the solver
- * in the derived class (the {@link LevenbergMarquardtEstimator
- * Levenberg-Marquardt estimator} does this).
- */
- protected double[] residuals;
-
- /** Cost value (square root of the sum of the residuals). */
- protected double cost;
-
- /** Maximal allowed number of cost evaluations. */
- private int maxCostEval;
-
- /** Number of cost evaluations. */
- private int costEvaluations;
-
- /** Number of jacobian evaluations. */
- private int jacobianEvaluations;
-
-}
\ No newline at end of file
diff --git a/src/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java b/src/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java
index 8728c7bff..394462634 100644
--- a/src/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java
+++ b/src/java/org/apache/commons/math/optimization/general/AbstractLeastSquaresOptimizer.java
@@ -30,8 +30,8 @@ import org.apache.commons.math.optimization.VectorialDifferentiableOptimizer;
import org.apache.commons.math.optimization.VectorialPointValuePair;
/**
- * Base class for implementing estimators.
- * This base class handles the boilerplates methods associated to thresholds
+ * Base class for implementing least squares optimizers.
+ *
This base class handles the boilerplate methods associated to thresholds
* settings, jacobian and error estimation.
* @version $Revision$ $Date$
* @since 1.2
@@ -61,8 +61,8 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
* Jacobian matrix.
* This matrix is in canonical form just after the calls to
* {@link #updateJacobian()}, but may be modified by the solver
- * in the derived class (the {@link LevenbergMarquardtEstimator
- * Levenberg-Marquardt estimator} does this).
+ * in the derived class (the {@link LevenbergMarquardtOptimizer
+ * Levenberg-Marquardt optimizer} does this).
*/
protected double[][] jacobian;
@@ -87,6 +87,9 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
/** Current objective function value. */
protected double[] objective;
+ /** Current residuals. */
+ protected double[] residuals;
+
/** Cost value (square root of the sum of the residuals). */
protected double cost;
@@ -114,6 +117,11 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
return objectiveEvaluations;
}
+ /** {@inheritDoc} */
+ public int getJacobianEvaluations() {
+ return jacobianEvaluations;
+ }
+
/** {@inheritDoc} */
public void setConvergenceChecker(VectorialConvergenceChecker checker) {
this.checker = checker;
@@ -175,7 +183,8 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
}
cost = 0;
for (int i = 0, index = 0; i < rows; i++, index += cols) {
- final double residual = objective[i] - target[i];
+ final double residual = target[i] - objective[i];
+ residuals[i] = residual;
cost += weights[i] * residual * residual;
}
cost = Math.sqrt(cost);
@@ -186,7 +195,7 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
* Get the Root Mean Square value.
* Get the Root Mean Square value, i.e. the root of the arithmetic
* mean of the square of all weighted residuals. This is related to the
- * criterion that is minimized by the estimator as follows: if
+ * criterion that is minimized by the optimizer as follows: if
* c if the criterion, and n is the number of
* measurements, then the RMS is sqrt (c/n).
*
@@ -195,7 +204,7 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
public double getRMS() {
double criterion = 0;
for (int i = 0; i < rows; ++i) {
- final double residual = objective[i] - target[i];
+ final double residual = residuals[i];
criterion += weights[i] * residual * residual;
}
return Math.sqrt(criterion / rows);
@@ -208,14 +217,14 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
public double getChiSquare() {
double chiSquare = 0;
for (int i = 0; i < rows; ++i) {
- final double residual = objective[i] - target[i];
+ final double residual = residuals[i];
chiSquare += residual * residual / weights[i];
}
return chiSquare;
}
/**
- * Get the covariance matrix of unbound estimated parameters.
+ * Get the covariance matrix of optimized parameters.
* @return covariance matrix
* @exception ObjectiveException if the function jacobian cannot
* be evaluated
@@ -231,12 +240,10 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
// compute transpose(J).J, avoiding building big intermediate matrices
double[][] jTj = new double[cols][cols];
for (int i = 0; i < cols; ++i) {
- final double[] ji = jacobian[i];
for (int j = i; j < cols; ++j) {
- final double[] jj = jacobian[j];
double sum = 0;
for (int k = 0; k < rows; ++k) {
- sum += ji[k] * jj[k];
+ sum += jacobian[k][i] * jacobian[k][j];
}
jTj[i][j] = sum;
jTj[j][i] = sum;
@@ -255,9 +262,9 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
}
/**
- * Guess the errors in unbound estimated parameters.
+ * Guess the errors in optimized parameters.
* Guessing is covariance-based, it only gives rough order of magnitude.
- * @return errors in estimated parameters
+ * @return errors in optimized parameters
* @exception ObjectiveException if the function jacobian cannot b evaluated
* @exception OptimizationException if the covariances matrix cannot be computed
* or the number of degrees of freedom is not positive (number of measurements
@@ -299,6 +306,7 @@ public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferen
this.target = target;
this.weights = weights;
this.variables = startPoint.clone();
+ this.residuals = new double[target.length];
// arrays shared with the other private methods
rows = target.length;
diff --git a/src/java/org/apache/commons/math/optimization/general/EstimatedParameter.java b/src/java/org/apache/commons/math/optimization/general/EstimatedParameter.java
deleted file mode 100644
index 0b3d7d253..000000000
--- a/src/java/org/apache/commons/math/optimization/general/EstimatedParameter.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.io.Serializable;
-
-/** This class represents the estimated parameters of an estimation problem.
- *
- * The parameters of an estimation problem have a name, a value and
- * a bound flag. The value of bound parameters is considered trusted
- * and the solvers should not adjust them. On the other hand, the
- * solvers should adjust the value of unbounds parameters until they
- * satisfy convergence criterions specific to each solver.
- *
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-
-public class EstimatedParameter
- implements Serializable {
-
- /** Simple constructor.
- * Build an instance from a first estimate of the parameter,
- * initially considered unbound.
- * @param name name of the parameter
- * @param firstEstimate first estimate of the parameter
- */
- public EstimatedParameter(String name, double firstEstimate) {
- this.name = name;
- estimate = firstEstimate;
- bound = false;
- }
-
- /** Simple constructor.
- * Build an instance from a first estimate of the parameter and a
- * bound flag
- * @param name name of the parameter
- * @param firstEstimate first estimate of the parameter
- * @param bound flag, should be true if the parameter is bound
- */
- public EstimatedParameter(String name,
- double firstEstimate,
- boolean bound) {
- this.name = name;
- estimate = firstEstimate;
- this.bound = bound;
- }
-
- /** Copy constructor.
- * Build a copy of a parameter
- * @param parameter instance to copy
- */
- public EstimatedParameter(EstimatedParameter parameter) {
- name = parameter.name;
- estimate = parameter.estimate;
- bound = parameter.bound;
- }
-
- /** Set a new estimated value for the parameter.
- * @param estimate new estimate for the parameter
- */
- public void setEstimate(double estimate) {
- this.estimate = estimate;
- }
-
- /** Get the current estimate of the parameter
- * @return current estimate
- */
- public double getEstimate() {
- return estimate;
- }
-
- /** get the name of the parameter
- * @return parameter name
- */
- public String getName() {
- return name;
- }
-
- /** Set the bound flag of the parameter
- * @param bound this flag should be set to true if the parameter is
- * bound (i.e. if it should not be adjusted by the solver).
- */
- public void setBound(boolean bound) {
- this.bound = bound;
- }
-
- /** Check if the parameter is bound
- * @return true if the parameter is bound */
- public boolean isBound() {
- return bound;
- }
-
- /** Name of the parameter */
- private String name;
-
- /** Current value of the parameter */
- protected double estimate;
-
- /** Indicator for bound parameters
- * (ie parameters that should not be estimated)
- */
- private boolean bound;
-
- /** Serializable version identifier */
- private static final long serialVersionUID = -555440800213416949L;
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/EstimationProblem.java b/src/java/org/apache/commons/math/optimization/general/EstimationProblem.java
deleted file mode 100644
index ab3d6c1db..000000000
--- a/src/java/org/apache/commons/math/optimization/general/EstimationProblem.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-/**
- * This interface represents an estimation problem.
- *
- * This interface should be implemented by all real estimation
- * problems before they can be handled by the estimators through the
- * {@link Estimator#estimate Estimator.estimate} method.
- *
- * An estimation problem, as seen by a solver is a set of
- * parameters and a set of measurements. The parameters are adjusted
- * during the estimation through the {@link #getUnboundParameters
- * getUnboundParameters} and {@link EstimatedParameter#setEstimate
- * EstimatedParameter.setEstimate} methods. The measurements both have
- * a measured value which is generally fixed at construction and a
- * theoretical value which depends on the model and hence varies as
- * the parameters are adjusted. The purpose of the solver is to reduce
- * the residual between these values, it can retrieve the measurements
- * through the {@link #getMeasurements getMeasurements} method.
- *
- * @see Estimator
- * @see WeightedMeasurement
- *
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-
-public interface EstimationProblem {
- /**
- * Get the measurements of an estimation problem.
- * @return measurements
- */
- WeightedMeasurement[] getMeasurements();
-
- /**
- * Get the unbound parameters of the problem.
- * @return unbound parameters
- */
- EstimatedParameter[] getUnboundParameters();
-
- /**
- * Get all the parameters of the problem.
- * @return parameters
- */
- EstimatedParameter[] getAllParameters();
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/Estimator.java b/src/java/org/apache/commons/math/optimization/general/Estimator.java
deleted file mode 100644
index d6df92b7d..000000000
--- a/src/java/org/apache/commons/math/optimization/general/Estimator.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import org.apache.commons.math.optimization.OptimizationException;
-
-/**
- * This interface represents solvers for estimation problems.
- *
- * The classes which are devoted to solve estimation problems
- * should implement this interface. The problems which can be handled
- * should implement the {@link EstimationProblem} interface which
- * gather all the information needed by the solver.
- *
- * The interface is composed only of the {@link #estimate estimate}
- * method.
- *
- * @see EstimationProblem
- *
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-
-public interface Estimator {
-
- /**
- * Solve an estimation problem.
- *
- * The method should set the parameters of the problem to several
- * trial values until it reaches convergence. If this method returns
- * normally (i.e. without throwing an exception), then the best
- * estimate of the parameters can be retrieved from the problem
- * itself, through the {@link EstimationProblem#getAllParameters
- * EstimationProblem.getAllParameters} method.
- *
- * @param problem estimation problem to solve
- * @exception OptimizationException if the problem cannot be solved
- *
- */
- void estimate(EstimationProblem problem)
- throws OptimizationException;
-
- /**
- * Get the Root Mean Square value.
- * Get the Root Mean Square value, i.e. the root of the arithmetic
- * mean of the square of all weighted residuals. This is related to the
- * criterion that is minimized by the estimator as follows: if
- * c is the criterion, and n is the number of
- * measurements, then the RMS is sqrt (c/n).
- * @see #guessParametersErrors(EstimationProblem)
- *
- * @param problem estimation problem
- * @return RMS value
- */
- double getRMS(EstimationProblem problem);
-
- /**
- * Get the covariance matrix of estimated parameters.
- * @param problem estimation problem
- * @return covariance matrix
- * @exception OptimizationException if the covariance matrix
- * cannot be computed (singular problem)
- */
- double[][] getCovariances(EstimationProblem problem)
- throws OptimizationException;
-
- /**
- * Guess the errors in estimated parameters.
- * @see #getRMS(EstimationProblem)
- * @param problem estimation problem
- * @return errors in estimated parameters
- * @exception OptimizationException if the error cannot be guessed
- */
- double[] guessParametersErrors(EstimationProblem problem)
- throws OptimizationException;
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/GaussNewtonEstimator.java b/src/java/org/apache/commons/math/optimization/general/GaussNewtonEstimator.java
deleted file mode 100644
index eba1675a3..000000000
--- a/src/java/org/apache/commons/math/optimization/general/GaussNewtonEstimator.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.io.Serializable;
-
-import org.apache.commons.math.linear.InvalidMatrixException;
-import org.apache.commons.math.linear.MatrixUtils;
-import org.apache.commons.math.linear.RealMatrix;
-import org.apache.commons.math.linear.RealVector;
-import org.apache.commons.math.linear.RealVectorImpl;
-import org.apache.commons.math.linear.decomposition.LUDecompositionImpl;
-import org.apache.commons.math.optimization.OptimizationException;
-
-/**
- * This class implements a solver for estimation problems.
- *
- * This class solves estimation problems using a weighted least
- * squares criterion on the measurement residuals. It uses a
- * Gauss-Newton algorithm.
- *
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-
-public class GaussNewtonEstimator extends AbstractEstimator implements Serializable {
-
- /** Serializable version identifier */
- private static final long serialVersionUID = 5485001826076289109L;
-
- /** Default threshold for cost steady state detection. */
- private static final double DEFAULT_STEADY_STATE_THRESHOLD = 1.0e-6;
-
- /** Default threshold for cost convergence. */
- private static final double DEFAULT_CONVERGENCE = 1.0e-6;
-
- /** Threshold for cost steady state detection. */
- private double steadyStateThreshold;
-
- /** Threshold for cost convergence. */
- private double convergence;
-
- /** Simple constructor with default settings.
- *
- * The estimator is built with default values for all settings.
- *
- * @see #DEFAULT_STEADY_STATE_THRESHOLD
- * @see #DEFAULT_CONVERGENCE
- * @see AbstractEstimator#DEFAULT_MAX_COST_EVALUATIONS
- */
- public GaussNewtonEstimator() {
- this.steadyStateThreshold = DEFAULT_STEADY_STATE_THRESHOLD;
- this.convergence = DEFAULT_CONVERGENCE;
- }
-
- /**
- * Simple constructor.
- *
- * This constructor builds an estimator and stores its convergence
- * characteristics.
- *
- * An estimator is considered to have converged whenever either
- * the criterion goes below a physical threshold under which
- * improvements are considered useless or when the algorithm is
- * unable to improve it (even if it is still high). The first
- * condition that is met stops the iterations.
- *
- * The fact an estimator has converged does not mean that the
- * model accurately fits the measurements. It only means no better
- * solution can be found, it does not mean this one is good. Such an
- * analysis is left to the caller.
- *
- * If neither conditions are fulfilled before a given number of
- * iterations, the algorithm is considered to have failed and an
- * {@link OptimizationException} is thrown.
- *
- * @param maxCostEval maximal number of cost evaluations allowed
- * @param convergence criterion threshold below which we do not need
- * to improve the criterion anymore
- * @param steadyStateThreshold steady state detection threshold, the
- * problem has converged has reached a steady state if
- * Math.abs(Jn - Jn-1) <
- * Jn × convergence
, where Jn
- * and Jn-1
are the current and preceding criterion
- * values (square sum of the weighted residuals of considered measurements).
- */
- public GaussNewtonEstimator(final int maxCostEval, final double convergence,
- final double steadyStateThreshold) {
- setMaxCostEval(maxCostEval);
- this.steadyStateThreshold = steadyStateThreshold;
- this.convergence = convergence;
- }
-
- /**
- * Set the convergence criterion threshold.
- * @param convergence criterion threshold below which we do not need
- * to improve the criterion anymore
- */
- public void setConvergence(final double convergence) {
- this.convergence = convergence;
- }
-
- /**
- * Set the steady state detection threshold.
- *
- * The problem has converged has reached a steady state if
- * Math.abs(Jn - Jn-1) <
- * Jn × convergence
, where Jn
- * and Jn-1
are the current and preceding criterion
- * values (square sum of the weighted residuals of considered measurements).
- *
- * @param steadyStateThreshold steady state detection threshold
- */
- public void setSteadyStateThreshold(final double steadyStateThreshold) {
- this.steadyStateThreshold = steadyStateThreshold;
- }
-
- /**
- * Solve an estimation problem using a least squares criterion.
- *
- * This method set the unbound parameters of the given problem
- * starting from their current values through several iterations. At
- * each step, the unbound parameters are changed in order to
- * minimize a weighted least square criterion based on the
- * measurements of the problem.
- *
- * The iterations are stopped either when the criterion goes
- * below a physical threshold under which improvement are considered
- * useless or when the algorithm is unable to improve it (even if it
- * is still high). The first condition that is met stops the
- * iterations. If the convergence it not reached before the maximum
- * number of iterations, an {@link OptimizationException} is
- * thrown.
- *
- * @param problem estimation problem to solve
- * @exception OptimizationException if the problem cannot be solved
- *
- * @see EstimationProblem
- *
- */
- public void estimate(EstimationProblem problem)
- throws OptimizationException {
-
- initializeEstimate(problem);
-
- // work matrices
- double[] grad = new double[parameters.length];
- RealVectorImpl bDecrement = new RealVectorImpl(parameters.length);
- double[] bDecrementData = bDecrement.getDataRef();
- RealMatrix wGradGradT = MatrixUtils.createRealMatrix(parameters.length, parameters.length);
-
- // iterate until convergence is reached
- double previous = Double.POSITIVE_INFINITY;
- do {
-
- // build the linear problem
- incrementJacobianEvaluationsCounter();
- RealVector b = new RealVectorImpl(parameters.length);
- RealMatrix a = MatrixUtils.createRealMatrix(parameters.length, parameters.length);
- for (int i = 0; i < measurements.length; ++i) {
- if (! measurements [i].isIgnored()) {
-
- double weight = measurements[i].getWeight();
- double residual = measurements[i].getResidual();
-
- // compute the normal equation
- for (int j = 0; j < parameters.length; ++j) {
- grad[j] = measurements[i].getPartial(parameters[j]);
- bDecrementData[j] = weight * residual * grad[j];
- }
-
- // build the contribution matrix for measurement i
- for (int k = 0; k < parameters.length; ++k) {
- double gk = grad[k];
- for (int l = 0; l < parameters.length; ++l) {
- wGradGradT.setEntry(k, l, weight * gk * grad[l]);
- }
- }
-
- // update the matrices
- a = a.add(wGradGradT);
- b = b.add(bDecrement);
-
- }
- }
-
- try {
-
- // solve the linearized least squares problem
- RealVector dX = new LUDecompositionImpl(a).getSolver().solve(b);
-
- // update the estimated parameters
- for (int i = 0; i < parameters.length; ++i) {
- parameters[i].setEstimate(parameters[i].getEstimate() + dX.getEntry(i));
- }
-
- } catch(InvalidMatrixException e) {
- throw new OptimizationException("unable to solve: singular problem");
- }
-
-
- previous = cost;
- updateResidualsAndCost();
-
- } while ((getCostEvaluations() < 2) ||
- (Math.abs(previous - cost) > (cost * steadyStateThreshold) &&
- (Math.abs(cost) > convergence)));
-
- }
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimator.java b/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimator.java
deleted file mode 100644
index 90f648dfc..000000000
--- a/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimator.java
+++ /dev/null
@@ -1,873 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.commons.math.optimization.general;
-
-import java.io.Serializable;
-import java.util.Arrays;
-
-import org.apache.commons.math.optimization.OptimizationException;
-
-
-/**
- * This class solves a least squares problem.
- *
- * This implementation should work even for over-determined systems
- * (i.e. systems having more variables than equations). Over-determined systems
- * are solved by ignoring the variables which have the smallest impact according
- * to their jacobian column norm. Only the rank of the matrix and some loop bounds
- * are changed to implement this.
- *
- * The resolution engine is a simple translation of the MINPACK lmder routine with minor
- * changes. The changes include the over-determined resolution and the Q.R.
- * decomposition which has been rewritten following the algorithm described in the
- * P. Lascaux and R. Theodor book Analyse numérique matricielle
- * appliquée à l'art de l'ingénieur, Masson 1986. The
- * redistribution policy for MINPACK is available here, for convenience, it
- * is reproduced below.
- *
- *
- *
- * Minpack Copyright Notice (1999) University of Chicago.
- * All rights reserved
- * |
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * - The end-user documentation included with the redistribution, if any,
- * must include the following acknowledgment:
- *
This product includes software developed by the University of
- * Chicago, as Operator of Argonne National Laboratory.
- * Alternately, this acknowledgment may appear in the software itself,
- * if and wherever such third-party acknowledgments normally appear.
- * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- * BE CORRECTED.
- * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- * POSSIBILITY OF SUCH LOSS OR DAMAGES.
- *
|
- *
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran)
- * @author Burton S. Garbow (original fortran)
- * @author Kenneth E. Hillstrom (original fortran)
- * @author Jorge J. More (original fortran)
-
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-public class LevenbergMarquardtEstimator extends AbstractEstimator implements Serializable {
-
- /**
- * Build an estimator for least squares problems.
- * The default values for the algorithm settings are:
- *
- * - {@link #setInitialStepBoundFactor initial step bound factor}: 100.0
- * - {@link #setMaxCostEval maximal cost evaluations}: 1000
- * - {@link #setCostRelativeTolerance cost relative tolerance}: 1.0e-10
- * - {@link #setParRelativeTolerance parameters relative tolerance}: 1.0e-10
- * - {@link #setOrthoTolerance orthogonality tolerance}: 1.0e-10
- *
- *
- */
- public LevenbergMarquardtEstimator() {
-
- // set up the superclass with a default max cost evaluations setting
- setMaxCostEval(1000);
-
- // default values for the tuning parameters
- setInitialStepBoundFactor(100.0);
- setCostRelativeTolerance(1.0e-10);
- setParRelativeTolerance(1.0e-10);
- setOrthoTolerance(1.0e-10);
-
- }
-
- /**
- * Set the positive input variable used in determining the initial step bound.
- * This bound is set to the product of initialStepBoundFactor and the euclidean norm of diag*x if nonzero,
- * or else to initialStepBoundFactor itself. In most cases factor should lie
- * in the interval (0.1, 100.0). 100.0 is a generally recommended value
- *
- * @param initialStepBoundFactor initial step bound factor
- * @see #estimate
- */
- public void setInitialStepBoundFactor(double initialStepBoundFactor) {
- this.initialStepBoundFactor = initialStepBoundFactor;
- }
-
- /**
- * Set the desired relative error in the sum of squares.
- *
- * @param costRelativeTolerance desired relative error in the sum of squares
- * @see #estimate
- */
- public void setCostRelativeTolerance(double costRelativeTolerance) {
- this.costRelativeTolerance = costRelativeTolerance;
- }
-
- /**
- * Set the desired relative error in the approximate solution parameters.
- *
- * @param parRelativeTolerance desired relative error
- * in the approximate solution parameters
- * @see #estimate
- */
- public void setParRelativeTolerance(double parRelativeTolerance) {
- this.parRelativeTolerance = parRelativeTolerance;
- }
-
- /**
- * Set the desired max cosine on the orthogonality.
- *
- * @param orthoTolerance desired max cosine on the orthogonality
- * between the function vector and the columns of the jacobian
- * @see #estimate
- */
- public void setOrthoTolerance(double orthoTolerance) {
- this.orthoTolerance = orthoTolerance;
- }
-
- /**
- * Solve an estimation problem using the Levenberg-Marquardt algorithm.
- * The algorithm used is a modified Levenberg-Marquardt one, based
- * on the MINPACK lmder
- * routine. The algorithm settings must have been set up before this method
- * is called with the {@link #setInitialStepBoundFactor},
- * {@link #setMaxCostEval}, {@link #setCostRelativeTolerance},
- * {@link #setParRelativeTolerance} and {@link #setOrthoTolerance} methods.
- * If these methods have not been called, the default values set up by the
- * {@link #LevenbergMarquardtEstimator() constructor} will be used.
- * The authors of the original fortran function are:
- *
- * - Argonne National Laboratory. MINPACK project. March 1980
- * - Burton S. Garbow
- * - Kenneth E. Hillstrom
- * - Jorge J. More
- *
- * Luc Maisonobe did the Java translation.
- *
- * @param problem estimation problem to solve
- * @exception OptimizationException if convergence cannot be
- * reached with the specified algorithm settings or if there are more variables
- * than equations
- * @see #setInitialStepBoundFactor
- * @see #setCostRelativeTolerance
- * @see #setParRelativeTolerance
- * @see #setOrthoTolerance
- */
- public void estimate(EstimationProblem problem)
- throws OptimizationException {
-
- initializeEstimate(problem);
-
- // arrays shared with the other private methods
- solvedCols = Math.min(rows, cols);
- diagR = new double[cols];
- jacNorm = new double[cols];
- beta = new double[cols];
- permutation = new int[cols];
- lmDir = new double[cols];
-
- // local variables
- double delta = 0, xNorm = 0;
- double[] diag = new double[cols];
- double[] oldX = new double[cols];
- double[] oldRes = new double[rows];
- double[] work1 = new double[cols];
- double[] work2 = new double[cols];
- double[] work3 = new double[cols];
-
- // evaluate the function at the starting point and calculate its norm
- updateResidualsAndCost();
-
- // outer loop
- lmPar = 0;
- boolean firstIteration = true;
- while (true) {
-
- // compute the Q.R. decomposition of the jacobian matrix
- updateJacobian();
- qrDecomposition();
-
- // compute Qt.res
- qTy(residuals);
-
- // now we don't need Q anymore,
- // so let jacobian contain the R matrix with its diagonal elements
- for (int k = 0; k < solvedCols; ++k) {
- int pk = permutation[k];
- jacobian[k * cols + pk] = diagR[pk];
- }
-
- if (firstIteration) {
-
- // scale the variables according to the norms of the columns
- // of the initial jacobian
- xNorm = 0;
- for (int k = 0; k < cols; ++k) {
- double dk = jacNorm[k];
- if (dk == 0) {
- dk = 1.0;
- }
- double xk = dk * parameters[k].getEstimate();
- xNorm += xk * xk;
- diag[k] = dk;
- }
- xNorm = Math.sqrt(xNorm);
-
- // initialize the step bound delta
- delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
-
- }
-
- // check orthogonality between function vector and jacobian columns
- double maxCosine = 0;
- if (cost != 0) {
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double s = jacNorm[pj];
- if (s != 0) {
- double sum = 0;
- for (int i = 0, index = pj; i <= j; ++i, index += cols) {
- sum += jacobian[index] * residuals[i];
- }
- maxCosine = Math.max(maxCosine, Math.abs(sum) / (s * cost));
- }
- }
- }
- if (maxCosine <= orthoTolerance) {
- return;
- }
-
- // rescale if necessary
- for (int j = 0; j < cols; ++j) {
- diag[j] = Math.max(diag[j], jacNorm[j]);
- }
-
- // inner loop
- for (double ratio = 0; ratio < 1.0e-4;) {
-
- // save the state
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- oldX[pj] = parameters[pj].getEstimate();
- }
- double previousCost = cost;
- double[] tmpVec = residuals;
- residuals = oldRes;
- oldRes = tmpVec;
-
- // determine the Levenberg-Marquardt parameter
- determineLMParameter(oldRes, delta, diag, work1, work2, work3);
-
- // compute the new point and the norm of the evolution direction
- double lmNorm = 0;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- lmDir[pj] = -lmDir[pj];
- parameters[pj].setEstimate(oldX[pj] + lmDir[pj]);
- double s = diag[pj] * lmDir[pj];
- lmNorm += s * s;
- }
- lmNorm = Math.sqrt(lmNorm);
-
- // on the first iteration, adjust the initial step bound.
- if (firstIteration) {
- delta = Math.min(delta, lmNorm);
- }
-
- // evaluate the function at x + p and calculate its norm
- updateResidualsAndCost();
-
- // compute the scaled actual reduction
- double actRed = -1.0;
- if (0.1 * cost < previousCost) {
- double r = cost / previousCost;
- actRed = 1.0 - r * r;
- }
-
- // compute the scaled predicted reduction
- // and the scaled directional derivative
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double dirJ = lmDir[pj];
- work1[j] = 0;
- for (int i = 0, index = pj; i <= j; ++i, index += cols) {
- work1[i] += jacobian[index] * dirJ;
- }
- }
- double coeff1 = 0;
- for (int j = 0; j < solvedCols; ++j) {
- coeff1 += work1[j] * work1[j];
- }
- double pc2 = previousCost * previousCost;
- coeff1 = coeff1 / pc2;
- double coeff2 = lmPar * lmNorm * lmNorm / pc2;
- double preRed = coeff1 + 2 * coeff2;
- double dirDer = -(coeff1 + coeff2);
-
- // ratio of the actual to the predicted reduction
- ratio = (preRed == 0) ? 0 : (actRed / preRed);
-
- // update the step bound
- if (ratio <= 0.25) {
- double tmp =
- (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
- if ((0.1 * cost >= previousCost) || (tmp < 0.1)) {
- tmp = 0.1;
- }
- delta = tmp * Math.min(delta, 10.0 * lmNorm);
- lmPar /= tmp;
- } else if ((lmPar == 0) || (ratio >= 0.75)) {
- delta = 2 * lmNorm;
- lmPar *= 0.5;
- }
-
- // test for successful iteration.
- if (ratio >= 1.0e-4) {
- // successful iteration, update the norm
- firstIteration = false;
- xNorm = 0;
- for (int k = 0; k < cols; ++k) {
- double xK = diag[k] * parameters[k].getEstimate();
- xNorm += xK * xK;
- }
- xNorm = Math.sqrt(xNorm);
- } else {
- // failed iteration, reset the previous values
- cost = previousCost;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- parameters[pj].setEstimate(oldX[pj]);
- }
- tmpVec = residuals;
- residuals = oldRes;
- oldRes = tmpVec;
- }
-
- // tests for convergence.
- if (((Math.abs(actRed) <= costRelativeTolerance) &&
- (preRed <= costRelativeTolerance) &&
- (ratio <= 2.0)) ||
- (delta <= parRelativeTolerance * xNorm)) {
- return;
- }
-
- // tests for termination and stringent tolerances
- // (2.2204e-16 is the machine epsilon for IEEE754)
- if ((Math.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
- throw new OptimizationException("cost relative tolerance is too small ({0})," +
- " no further reduction in the" +
- " sum of squares is possible",
- costRelativeTolerance);
- } else if (delta <= 2.2204e-16 * xNorm) {
- throw new OptimizationException("parameters relative tolerance is too small" +
- " ({0}), no further improvement in" +
- " the approximate solution is possible",
- parRelativeTolerance);
- } else if (maxCosine <= 2.2204e-16) {
- throw new OptimizationException("orthogonality tolerance is too small ({0})," +
- " solution is orthogonal to the jacobian",
- orthoTolerance);
- }
-
- }
-
- }
-
- }
-
- /**
- * Determine the Levenberg-Marquardt parameter.
- * This implementation is a translation in Java of the MINPACK
- * lmpar
- * routine.
- * This method sets the lmPar and lmDir attributes.
- * The authors of the original fortran function are:
- *
- * - Argonne National Laboratory. MINPACK project. March 1980
- * - Burton S. Garbow
- * - Kenneth E. Hillstrom
- * - Jorge J. More
- *
- * Luc Maisonobe did the Java translation.
- *
- * @param qy array containing qTy
- * @param delta upper bound on the euclidean norm of diagR * lmDir
- * @param diag diagonal matrix
- * @param work1 work array
- * @param work2 work array
- * @param work3 work array
- */
- private void determineLMParameter(double[] qy, double delta, double[] diag,
- double[] work1, double[] work2, double[] work3) {
-
- // compute and store in x the gauss-newton direction, if the
- // jacobian is rank-deficient, obtain a least squares solution
- for (int j = 0; j < rank; ++j) {
- lmDir[permutation[j]] = qy[j];
- }
- for (int j = rank; j < cols; ++j) {
- lmDir[permutation[j]] = 0;
- }
- for (int k = rank - 1; k >= 0; --k) {
- int pk = permutation[k];
- double ypk = lmDir[pk] / diagR[pk];
- for (int i = 0, index = pk; i < k; ++i, index += cols) {
- lmDir[permutation[i]] -= ypk * jacobian[index];
- }
- lmDir[pk] = ypk;
- }
-
- // evaluate the function at the origin, and test
- // for acceptance of the Gauss-Newton direction
- double dxNorm = 0;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double s = diag[pj] * lmDir[pj];
- work1[pj] = s;
- dxNorm += s * s;
- }
- dxNorm = Math.sqrt(dxNorm);
- double fp = dxNorm - delta;
- if (fp <= 0.1 * delta) {
- lmPar = 0;
- return;
- }
-
- // if the jacobian is not rank deficient, the Newton step provides
- // a lower bound, parl, for the zero of the function,
- // otherwise set this bound to zero
- double sum2, parl = 0;
- if (rank == solvedCols) {
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- work1[pj] *= diag[pj] / dxNorm;
- }
- sum2 = 0;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double sum = 0;
- for (int i = 0, index = pj; i < j; ++i, index += cols) {
- sum += jacobian[index] * work1[permutation[i]];
- }
- double s = (work1[pj] - sum) / diagR[pj];
- work1[pj] = s;
- sum2 += s * s;
- }
- parl = fp / (delta * sum2);
- }
-
- // calculate an upper bound, paru, for the zero of the function
- sum2 = 0;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double sum = 0;
- for (int i = 0, index = pj; i <= j; ++i, index += cols) {
- sum += jacobian[index] * qy[i];
- }
- sum /= diag[pj];
- sum2 += sum * sum;
- }
- double gNorm = Math.sqrt(sum2);
- double paru = gNorm / delta;
- if (paru == 0) {
- // 2.2251e-308 is the smallest positive real for IEE754
- paru = 2.2251e-308 / Math.min(delta, 0.1);
- }
-
- // if the input par lies outside of the interval (parl,paru),
- // set par to the closer endpoint
- lmPar = Math.min(paru, Math.max(lmPar, parl));
- if (lmPar == 0) {
- lmPar = gNorm / dxNorm;
- }
-
- for (int countdown = 10; countdown >= 0; --countdown) {
-
- // evaluate the function at the current value of lmPar
- if (lmPar == 0) {
- lmPar = Math.max(2.2251e-308, 0.001 * paru);
- }
- double sPar = Math.sqrt(lmPar);
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- work1[pj] = sPar * diag[pj];
- }
- determineLMDirection(qy, work1, work2, work3);
-
- dxNorm = 0;
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- double s = diag[pj] * lmDir[pj];
- work3[pj] = s;
- dxNorm += s * s;
- }
- dxNorm = Math.sqrt(dxNorm);
- double previousFP = fp;
- fp = dxNorm - delta;
-
- // if the function is small enough, accept the current value
- // of lmPar, also test for the exceptional cases where parl is zero
- if ((Math.abs(fp) <= 0.1 * delta) ||
- ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
- return;
- }
-
- // compute the Newton correction
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- work1[pj] = work3[pj] * diag[pj] / dxNorm;
- }
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- work1[pj] /= work2[j];
- double tmp = work1[pj];
- for (int i = j + 1; i < solvedCols; ++i) {
- work1[permutation[i]] -= jacobian[i * cols + pj] * tmp;
- }
- }
- sum2 = 0;
- for (int j = 0; j < solvedCols; ++j) {
- double s = work1[permutation[j]];
- sum2 += s * s;
- }
- double correction = fp / (delta * sum2);
-
- // depending on the sign of the function, update parl or paru.
- if (fp > 0) {
- parl = Math.max(parl, lmPar);
- } else if (fp < 0) {
- paru = Math.min(paru, lmPar);
- }
-
- // compute an improved estimate for lmPar
- lmPar = Math.max(parl, lmPar + correction);
-
- }
- }
-
- /**
- * Solve a*x = b and d*x = 0 in the least squares sense.
- * This implementation is a translation in Java of the MINPACK
- * qrsolv
- * routine.
- * This method sets the lmDir and lmDiag attributes.
- * The authors of the original fortran function are:
- *
- * - Argonne National Laboratory. MINPACK project. March 1980
- * - Burton S. Garbow
- * - Kenneth E. Hillstrom
- * - Jorge J. More
- *
- * Luc Maisonobe did the Java translation.
- *
- * @param qy array containing qTy
- * @param diag diagonal matrix
- * @param lmDiag diagonal elements associated with lmDir
- * @param work work array
- */
- private void determineLMDirection(double[] qy, double[] diag,
- double[] lmDiag, double[] work) {
-
- // copy R and Qty to preserve input and initialize s
- // in particular, save the diagonal elements of R in lmDir
- for (int j = 0; j < solvedCols; ++j) {
- int pj = permutation[j];
- for (int i = j + 1; i < solvedCols; ++i) {
- jacobian[i * cols + pj] = jacobian[j * cols + permutation[i]];
- }
- lmDir[j] = diagR[pj];
- work[j] = qy[j];
- }
-
- // eliminate the diagonal matrix d using a Givens rotation
- for (int j = 0; j < solvedCols; ++j) {
-
- // prepare the row of d to be eliminated, locating the
- // diagonal element using p from the Q.R. factorization
- int pj = permutation[j];
- double dpj = diag[pj];
- if (dpj != 0) {
- Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
- }
- lmDiag[j] = dpj;
-
- // the transformations to eliminate the row of d
- // modify only a single element of Qty
- // beyond the first n, which is initially zero.
- double qtbpj = 0;
- for (int k = j; k < solvedCols; ++k) {
- int pk = permutation[k];
-
- // determine a Givens rotation which eliminates the
- // appropriate element in the current row of d
- if (lmDiag[k] != 0) {
-
- double sin, cos;
- double rkk = jacobian[k * cols + pk];
- if (Math.abs(rkk) < Math.abs(lmDiag[k])) {
- double cotan = rkk / lmDiag[k];
- sin = 1.0 / Math.sqrt(1.0 + cotan * cotan);
- cos = sin * cotan;
- } else {
- double tan = lmDiag[k] / rkk;
- cos = 1.0 / Math.sqrt(1.0 + tan * tan);
- sin = cos * tan;
- }
-
- // compute the modified diagonal element of R and
- // the modified element of (Qty,0)
- jacobian[k * cols + pk] = cos * rkk + sin * lmDiag[k];
- double temp = cos * work[k] + sin * qtbpj;
- qtbpj = -sin * work[k] + cos * qtbpj;
- work[k] = temp;
-
- // accumulate the tranformation in the row of s
- for (int i = k + 1; i < solvedCols; ++i) {
- double rik = jacobian[i * cols + pk];
- temp = cos * rik + sin * lmDiag[i];
- lmDiag[i] = -sin * rik + cos * lmDiag[i];
- jacobian[i * cols + pk] = temp;
- }
-
- }
- }
-
- // store the diagonal element of s and restore
- // the corresponding diagonal element of R
- int index = j * cols + permutation[j];
- lmDiag[j] = jacobian[index];
- jacobian[index] = lmDir[j];
-
- }
-
- // solve the triangular system for z, if the system is
- // singular, then obtain a least squares solution
- int nSing = solvedCols;
- for (int j = 0; j < solvedCols; ++j) {
- if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
- nSing = j;
- }
- if (nSing < solvedCols) {
- work[j] = 0;
- }
- }
- if (nSing > 0) {
- for (int j = nSing - 1; j >= 0; --j) {
- int pj = permutation[j];
- double sum = 0;
- for (int i = j + 1; i < nSing; ++i) {
- sum += jacobian[i * cols + pj] * work[i];
- }
- work[j] = (work[j] - sum) / lmDiag[j];
- }
- }
-
- // permute the components of z back to components of lmDir
- for (int j = 0; j < lmDir.length; ++j) {
- lmDir[permutation[j]] = work[j];
- }
-
- }
-
- /**
- * Decompose a matrix A as A.P = Q.R using Householder transforms.
- * As suggested in the P. Lascaux and R. Theodor book
- * Analyse numérique matricielle appliquée à
- * l'art de l'ingénieur (Masson, 1986), instead of representing
- * the Householder transforms with uk unit vectors such that:
- *
- * Hk = I - 2uk.ukt
- *
- * we use k non-unit vectors such that:
- *
- * Hk = I - betakvk.vkt
- *
- * where vk = ak - alphak ek.
- * The betak coefficients are provided upon exit as recomputing
- * them from the vk vectors would be costly.
- * This decomposition handles rank deficient cases since the tranformations
- * are performed in non-increasing columns norms order thanks to columns
- * pivoting. The diagonal elements of the R matrix are therefore also in
- * non-increasing absolute values order.
- * @exception OptimizationException if the decomposition cannot be performed
- */
- private void qrDecomposition() throws OptimizationException {
-
- // initializations
- for (int k = 0; k < cols; ++k) {
- permutation[k] = k;
- double norm2 = 0;
- for (int index = k; index < jacobian.length; index += cols) {
- double akk = jacobian[index];
- norm2 += akk * akk;
- }
- jacNorm[k] = Math.sqrt(norm2);
- }
-
- // transform the matrix column after column
- for (int k = 0; k < cols; ++k) {
-
- // select the column with the greatest norm on active components
- int nextColumn = -1;
- double ak2 = Double.NEGATIVE_INFINITY;
- for (int i = k; i < cols; ++i) {
- double norm2 = 0;
- int iDiag = k * cols + permutation[i];
- for (int index = iDiag; index < jacobian.length; index += cols) {
- double aki = jacobian[index];
- norm2 += aki * aki;
- }
- if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
- throw new OptimizationException(
- "unable to perform Q.R decomposition on the {0}x{1} jacobian matrix",
- rows, cols);
- }
- if (norm2 > ak2) {
- nextColumn = i;
- ak2 = norm2;
- }
- }
- if (ak2 == 0) {
- rank = k;
- return;
- }
- int pk = permutation[nextColumn];
- permutation[nextColumn] = permutation[k];
- permutation[k] = pk;
-
- // choose alpha such that Hk.u = alpha ek
- int kDiag = k * cols + pk;
- double akk = jacobian[kDiag];
- double alpha = (akk > 0) ? -Math.sqrt(ak2) : Math.sqrt(ak2);
- double betak = 1.0 / (ak2 - akk * alpha);
- beta[pk] = betak;
-
- // transform the current column
- diagR[pk] = alpha;
- jacobian[kDiag] -= alpha;
-
- // transform the remaining columns
- for (int dk = cols - 1 - k; dk > 0; --dk) {
- int dkp = permutation[k + dk] - pk;
- double gamma = 0;
- for (int index = kDiag; index < jacobian.length; index += cols) {
- gamma += jacobian[index] * jacobian[index + dkp];
- }
- gamma *= betak;
- for (int index = kDiag; index < jacobian.length; index += cols) {
- jacobian[index + dkp] -= gamma * jacobian[index];
- }
- }
-
- }
-
- rank = solvedCols;
-
- }
-
- /**
- * Compute the product Qt.y for some Q.R. decomposition.
- *
- * @param y vector to multiply (will be overwritten with the result)
- */
- private void qTy(double[] y) {
- for (int k = 0; k < cols; ++k) {
- int pk = permutation[k];
- int kDiag = k * cols + pk;
- double gamma = 0;
- for (int i = k, index = kDiag; i < rows; ++i, index += cols) {
- gamma += jacobian[index] * y[i];
- }
- gamma *= beta[pk];
- for (int i = k, index = kDiag; i < rows; ++i, index += cols) {
- y[i] -= gamma * jacobian[index];
- }
- }
- }
-
- /** Number of solved variables. */
- private int solvedCols;
-
- /** Diagonal elements of the R matrix in the Q.R. decomposition. */
- private double[] diagR;
-
- /** Norms of the columns of the jacobian matrix. */
- private double[] jacNorm;
-
- /** Coefficients of the Householder transforms vectors. */
- private double[] beta;
-
- /** Columns permutation array. */
- private int[] permutation;
-
- /** Rank of the jacobian matrix. */
- private int rank;
-
- /** Levenberg-Marquardt parameter. */
- private double lmPar;
-
- /** Parameters evolution direction associated with lmPar. */
- private double[] lmDir;
-
- /** Positive input variable used in determining the initial step bound. */
- private double initialStepBoundFactor;
-
- /** Desired relative error in the sum of squares. */
- private double costRelativeTolerance;
-
- /** Desired relative error in the approximate solution parameters. */
- private double parRelativeTolerance;
-
- /** Desired max cosine on the orthogonality between the function vector
- * and the columns of the jacobian. */
- private double orthoTolerance;
-
- /** Serializable version identifier */
- private static final long serialVersionUID = -5705952631533171019L;
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java b/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java
new file mode 100644
index 000000000..232901aaf
--- /dev/null
+++ b/src/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java
@@ -0,0 +1,838 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math.optimization.general;
+
+import java.util.Arrays;
+
+import org.apache.commons.math.optimization.ObjectiveException;
+import org.apache.commons.math.optimization.OptimizationException;
+import org.apache.commons.math.optimization.VectorialPointValuePair;
+
+
+/**
+ * This class solves a least squares problem using the Levenberg-Marquardt algorithm.
+ *
+ * This implementation should work even for over-determined systems
+ * (i.e. systems having more variables than equations). Over-determined systems
+ * are solved by ignoring the variables which have the smallest impact according
+ * to their jacobian column norm. Only the rank of the matrix and some loop bounds
+ * are changed to implement this.
+ *
+ * The resolution engine is a simple translation of the MINPACK lmder routine with minor
+ * changes. The changes include the over-determined resolution and the Q.R.
+ * decomposition which has been rewritten following the algorithm described in the
+ * P. Lascaux and R. Theodor book Analyse numérique matricielle
+ * appliquée à l'art de l'ingénieur, Masson 1986. The
+ * redistribution policy for MINPACK is available here, for convenience, it
+ * is reproduced below.
+ *
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran)
+ * @author Burton S. Garbow (original fortran)
+ * @author Kenneth E. Hillstrom (original fortran)
+ * @author Jorge J. More (original fortran)
+
+ * @version $Revision$ $Date$
+ * @since 2.0
+ *
+ */
+public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer {
+
+ /** Serializable version identifier */
+ private static final long serialVersionUID = 8851282236194244323L;
+
+ /** Number of solved variables. */
+ private int solvedCols;
+
+ /** Diagonal elements of the R matrix in the Q.R. decomposition. */
+ private double[] diagR;
+
+ /** Norms of the columns of the jacobian matrix. */
+ private double[] jacNorm;
+
+ /** Coefficients of the Householder transforms vectors. */
+ private double[] beta;
+
+ /** Columns permutation array. */
+ private int[] permutation;
+
+ /** Rank of the jacobian matrix. */
+ private int rank;
+
+ /** Levenberg-Marquardt parameter. */
+ private double lmPar;
+
+ /** Parameters evolution direction associated with lmPar. */
+ private double[] lmDir;
+
+ /** Positive input variable used in determining the initial step bound. */
+ private double initialStepBoundFactor;
+
+ /** Desired relative error in the sum of squares. */
+ private double costRelativeTolerance;
+
+ /** Desired relative error in the approximate solution parameters. */
+ private double parRelativeTolerance;
+
+ /** Desired max cosine on the orthogonality between the function vector
+ * and the columns of the jacobian. */
+ private double orthoTolerance;
+
+ /**
+ * Build an optimizer for least squares problems.
+ * The default values for the algorithm settings are:
+ *
+ * - {@link #setInitialStepBoundFactor initial step bound factor}: 100.0
+ * - {@link #setMaxCostEval maximal cost evaluations}: 1000
+ * - {@link #setCostRelativeTolerance cost relative tolerance}: 1.0e-10
+ * - {@link #setParRelativeTolerance parameters relative tolerance}: 1.0e-10
+ * - {@link #setOrthoTolerance orthogonality tolerance}: 1.0e-10
+ *
+ *
+ */
+ public LevenbergMarquardtOptimizer() {
+
+ // set up the superclass with a default max cost evaluations setting
+ setMaxEvaluations(1000);
+
+ // default values for the tuning parameters
+ setInitialStepBoundFactor(100.0);
+ setCostRelativeTolerance(1.0e-10);
+ setParRelativeTolerance(1.0e-10);
+ setOrthoTolerance(1.0e-10);
+
+ }
+
+ /**
+ * Set the positive input variable used in determining the initial step bound.
+ * This bound is set to the product of initialStepBoundFactor and the euclidean
+ * norm of diag*x if nonzero, or else to initialStepBoundFactor itself. In most
+ * cases factor should lie in the interval (0.1, 100.0). 100.0 is a generally
+ * recommended value.
+ *
+ * @param initialStepBoundFactor initial step bound factor
+ */
+ public void setInitialStepBoundFactor(double initialStepBoundFactor) {
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ }
+
+ /**
+ * Set the desired relative error in the sum of squares.
+ *
+ * @param costRelativeTolerance desired relative error in the sum of squares
+ */
+ public void setCostRelativeTolerance(double costRelativeTolerance) {
+ this.costRelativeTolerance = costRelativeTolerance;
+ }
+
+ /**
+ * Set the desired relative error in the approximate solution parameters.
+ *
+ * @param parRelativeTolerance desired relative error
+ * in the approximate solution parameters
+ */
+ public void setParRelativeTolerance(double parRelativeTolerance) {
+ this.parRelativeTolerance = parRelativeTolerance;
+ }
+
+ /**
+ * Set the desired max cosine on the orthogonality.
+ *
+ * @param orthoTolerance desired max cosine on the orthogonality
+ * between the function vector and the columns of the jacobian
+ */
+ public void setOrthoTolerance(double orthoTolerance) {
+ this.orthoTolerance = orthoTolerance;
+ }
+
+ /** {@inheritDoc} */
+ protected VectorialPointValuePair doOptimize()
+ throws ObjectiveException, OptimizationException, IllegalArgumentException {
+
+ // arrays shared with the other private methods
+ solvedCols = Math.min(rows, cols);
+ diagR = new double[cols];
+ jacNorm = new double[cols];
+ beta = new double[cols];
+ permutation = new int[cols];
+ lmDir = new double[cols];
+
+ // local variables
+ double delta = 0, xNorm = 0;
+ double[] diag = new double[cols];
+ double[] oldX = new double[cols];
+ double[] oldRes = new double[rows];
+ double[] work1 = new double[cols];
+ double[] work2 = new double[cols];
+ double[] work3 = new double[cols];
+
+ // evaluate the function at the starting point and calculate its norm
+ updateResidualsAndCost();
+
+ // outer loop
+ lmPar = 0;
+ boolean firstIteration = true;
+ while (true) {
+
+ // compute the Q.R. decomposition of the jacobian matrix
+ updateJacobian();
+ qrDecomposition();
+
+ // compute Qt.res
+ qTy(residuals);
+
+ // now we don't need Q anymore,
+ // so let jacobian contain the R matrix with its diagonal elements
+ for (int k = 0; k < solvedCols; ++k) {
+ int pk = permutation[k];
+ jacobian[k][pk] = diagR[pk];
+ }
+
+ if (firstIteration) {
+
+ // scale the variables according to the norms of the columns
+ // of the initial jacobian
+ xNorm = 0;
+ for (int k = 0; k < cols; ++k) {
+ double dk = jacNorm[k];
+ if (dk == 0) {
+ dk = 1.0;
+ }
+ double xk = dk * variables[k];
+ xNorm += xk * xk;
+ diag[k] = dk;
+ }
+ xNorm = Math.sqrt(xNorm);
+
+ // initialize the step bound delta
+ delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
+
+ }
+
+ // check orthogonality between function vector and jacobian columns
+ double maxCosine = 0;
+ if (cost != 0) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = jacNorm[pj];
+ if (s != 0) {
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += jacobian[i][pj] * residuals[i];
+ }
+ maxCosine = Math.max(maxCosine, Math.abs(sum) / (s * cost));
+ }
+ }
+ }
+ if (maxCosine <= orthoTolerance) {
+ // convergence has been reached
+ return new VectorialPointValuePair(variables, objective);
+ }
+
+ // rescale if necessary
+ for (int j = 0; j < cols; ++j) {
+ diag[j] = Math.max(diag[j], jacNorm[j]);
+ }
+
+ // inner loop
+ for (double ratio = 0; ratio < 1.0e-4;) {
+
+ // save the state
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ oldX[pj] = variables[pj];
+ }
+ double previousCost = cost;
+ double[] tmpVec = residuals;
+ residuals = oldRes;
+ oldRes = tmpVec;
+
+ // determine the Levenberg-Marquardt parameter
+ determineLMParameter(oldRes, delta, diag, work1, work2, work3);
+
+ // compute the new point and the norm of the evolution direction
+ double lmNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ lmDir[pj] = -lmDir[pj];
+ variables[pj] = oldX[pj] + lmDir[pj];
+ double s = diag[pj] * lmDir[pj];
+ lmNorm += s * s;
+ }
+ lmNorm = Math.sqrt(lmNorm);
+
+ // on the first iteration, adjust the initial step bound.
+ if (firstIteration) {
+ delta = Math.min(delta, lmNorm);
+ }
+
+ // evaluate the function at x + p and calculate its norm
+ updateResidualsAndCost();
+
+ // compute the scaled actual reduction
+ double actRed = -1.0;
+ if (0.1 * cost < previousCost) {
+ double r = cost / previousCost;
+ actRed = 1.0 - r * r;
+ }
+
+ // compute the scaled predicted reduction
+ // and the scaled directional derivative
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double dirJ = lmDir[pj];
+ work1[j] = 0;
+ for (int i = 0; i <= j; ++i) {
+ work1[i] += jacobian[i][pj] * dirJ;
+ }
+ }
+ double coeff1 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ coeff1 += work1[j] * work1[j];
+ }
+ double pc2 = previousCost * previousCost;
+ coeff1 = coeff1 / pc2;
+ double coeff2 = lmPar * lmNorm * lmNorm / pc2;
+ double preRed = coeff1 + 2 * coeff2;
+ double dirDer = -(coeff1 + coeff2);
+
+ // ratio of the actual to the predicted reduction
+ ratio = (preRed == 0) ? 0 : (actRed / preRed);
+
+ // update the step bound
+ if (ratio <= 0.25) {
+ double tmp =
+ (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
+ if ((0.1 * cost >= previousCost) || (tmp < 0.1)) {
+ tmp = 0.1;
+ }
+ delta = tmp * Math.min(delta, 10.0 * lmNorm);
+ lmPar /= tmp;
+ } else if ((lmPar == 0) || (ratio >= 0.75)) {
+ delta = 2 * lmNorm;
+ lmPar *= 0.5;
+ }
+
+ // test for successful iteration.
+ if (ratio >= 1.0e-4) {
+ // successful iteration, update the norm
+ firstIteration = false;
+ xNorm = 0;
+ for (int k = 0; k < cols; ++k) {
+ double xK = diag[k] * variables[k];
+ xNorm += xK * xK;
+ }
+ xNorm = Math.sqrt(xNorm);
+ } else {
+ // failed iteration, reset the previous values
+ cost = previousCost;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ variables[pj] = oldX[pj];
+ }
+ tmpVec = residuals;
+ residuals = oldRes;
+ oldRes = tmpVec;
+ }
+
+ // tests for convergence.
+ if (((Math.abs(actRed) <= costRelativeTolerance) &&
+ (preRed <= costRelativeTolerance) &&
+ (ratio <= 2.0)) ||
+ (delta <= parRelativeTolerance * xNorm)) {
+ return new VectorialPointValuePair(variables, objective);
+ }
+
+ // tests for termination and stringent tolerances
+ // (2.2204e-16 is the machine epsilon for IEEE754)
+ if ((Math.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
+ throw new OptimizationException("cost relative tolerance is too small ({0})," +
+ " no further reduction in the" +
+ " sum of squares is possible",
+ costRelativeTolerance);
+ } else if (delta <= 2.2204e-16 * xNorm) {
+ throw new OptimizationException("parameters relative tolerance is too small" +
+ " ({0}), no further improvement in" +
+ " the approximate solution is possible",
+ parRelativeTolerance);
+ } else if (maxCosine <= 2.2204e-16) {
+ throw new OptimizationException("orthogonality tolerance is too small ({0})," +
+ " solution is orthogonal to the jacobian",
+ orthoTolerance);
+ }
+
+ }
+
+ }
+
+ }
+
+ /**
+ * Determine the Levenberg-Marquardt parameter.
+ * This implementation is a translation in Java of the MINPACK
+ * lmpar
+ * routine.
+ * This method sets the lmPar and lmDir attributes.
+ * The authors of the original fortran function are:
+ *
+ * - Argonne National Laboratory. MINPACK project. March 1980
+ * - Burton S. Garbow
+ * - Kenneth E. Hillstrom
+ * - Jorge J. More
+ *
+ * Luc Maisonobe did the Java translation.
+ *
+ * @param qy array containing qTy
+ * @param delta upper bound on the euclidean norm of diagR * lmDir
+ * @param diag diagonal matrix
+ * @param work1 work array
+ * @param work2 work array
+ * @param work3 work array
+ */
+ private void determineLMParameter(double[] qy, double delta, double[] diag,
+ double[] work1, double[] work2, double[] work3) {
+
+ // compute and store in x the gauss-newton direction, if the
+ // jacobian is rank-deficient, obtain a least squares solution
+ for (int j = 0; j < rank; ++j) {
+ lmDir[permutation[j]] = qy[j];
+ }
+ for (int j = rank; j < cols; ++j) {
+ lmDir[permutation[j]] = 0;
+ }
+ for (int k = rank - 1; k >= 0; --k) {
+ int pk = permutation[k];
+ double ypk = lmDir[pk] / diagR[pk];
+ for (int i = 0; i < k; ++i) {
+ lmDir[permutation[i]] -= ypk * jacobian[i][pk];
+ }
+ lmDir[pk] = ypk;
+ }
+
+ // evaluate the function at the origin, and test
+ // for acceptance of the Gauss-Newton direction
+ double dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work1[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = Math.sqrt(dxNorm);
+ double fp = dxNorm - delta;
+ if (fp <= 0.1 * delta) {
+ lmPar = 0;
+ return;
+ }
+
+ // if the jacobian is not rank deficient, the Newton step provides
+ // a lower bound, parl, for the zero of the function,
+ // otherwise set this bound to zero
+ double sum2, parl = 0;
+ if (rank == solvedCols) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] *= diag[pj] / dxNorm;
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i < j; ++i) {
+ sum += jacobian[i][pj] * work1[permutation[i]];
+ }
+ double s = (work1[pj] - sum) / diagR[pj];
+ work1[pj] = s;
+ sum2 += s * s;
+ }
+ parl = fp / (delta * sum2);
+ }
+
+ // calculate an upper bound, paru, for the zero of the function
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += jacobian[i][pj] * qy[i];
+ }
+ sum /= diag[pj];
+ sum2 += sum * sum;
+ }
+ double gNorm = Math.sqrt(sum2);
+ double paru = gNorm / delta;
+ if (paru == 0) {
+ // 2.2251e-308 is the smallest positive real for IEE754
+ paru = 2.2251e-308 / Math.min(delta, 0.1);
+ }
+
+ // if the input par lies outside of the interval (parl,paru),
+ // set par to the closer endpoint
+ lmPar = Math.min(paru, Math.max(lmPar, parl));
+ if (lmPar == 0) {
+ lmPar = gNorm / dxNorm;
+ }
+
+ for (int countdown = 10; countdown >= 0; --countdown) {
+
+ // evaluate the function at the current value of lmPar
+ if (lmPar == 0) {
+ lmPar = Math.max(2.2251e-308, 0.001 * paru);
+ }
+ double sPar = Math.sqrt(lmPar);
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = sPar * diag[pj];
+ }
+ determineLMDirection(qy, work1, work2, work3);
+
+ dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work3[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = Math.sqrt(dxNorm);
+ double previousFP = fp;
+ fp = dxNorm - delta;
+
+ // if the function is small enough, accept the current value
+ // of lmPar, also test for the exceptional cases where parl is zero
+ if ((Math.abs(fp) <= 0.1 * delta) ||
+ ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
+ return;
+ }
+
+ // compute the Newton correction
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = work3[pj] * diag[pj] / dxNorm;
+ }
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] /= work2[j];
+ double tmp = work1[pj];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ work1[permutation[i]] -= jacobian[i][pj] * tmp;
+ }
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ double s = work1[permutation[j]];
+ sum2 += s * s;
+ }
+ double correction = fp / (delta * sum2);
+
+ // depending on the sign of the function, update parl or paru.
+ if (fp > 0) {
+ parl = Math.max(parl, lmPar);
+ } else if (fp < 0) {
+ paru = Math.min(paru, lmPar);
+ }
+
+ // compute an improved estimate for lmPar
+ lmPar = Math.max(parl, lmPar + correction);
+
+ }
+ }
+
+ /**
+ * Solve a*x = b and d*x = 0 in the least squares sense.
+ * This implementation is a translation in Java of the MINPACK
+ * qrsolv
+ * routine.
+ * This method sets the lmDir and lmDiag attributes.
+ * The authors of the original fortran function are:
+ *
+ * - Argonne National Laboratory. MINPACK project. March 1980
+ * - Burton S. Garbow
+ * - Kenneth E. Hillstrom
+ * - Jorge J. More
+ *
+ * Luc Maisonobe did the Java translation.
+ *
+ * @param qy array containing qTy
+ * @param diag diagonal matrix
+ * @param lmDiag diagonal elements associated with lmDir
+ * @param work work array
+ */
+ private void determineLMDirection(double[] qy, double[] diag,
+ double[] lmDiag, double[] work) {
+
+ // copy R and Qty to preserve input and initialize s
+ // in particular, save the diagonal elements of R in lmDir
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ jacobian[i][pj] = jacobian[j][permutation[i]];
+ }
+ lmDir[j] = diagR[pj];
+ work[j] = qy[j];
+ }
+
+ // eliminate the diagonal matrix d using a Givens rotation
+ for (int j = 0; j < solvedCols; ++j) {
+
+ // prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the Q.R. factorization
+ int pj = permutation[j];
+ double dpj = diag[pj];
+ if (dpj != 0) {
+ Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
+ }
+ lmDiag[j] = dpj;
+
+ // the transformations to eliminate the row of d
+ // modify only a single element of Qty
+ // beyond the first n, which is initially zero.
+ double qtbpj = 0;
+ for (int k = j; k < solvedCols; ++k) {
+ int pk = permutation[k];
+
+ // determine a Givens rotation which eliminates the
+ // appropriate element in the current row of d
+ if (lmDiag[k] != 0) {
+
+ double sin, cos;
+ double rkk = jacobian[k][pk];
+ if (Math.abs(rkk) < Math.abs(lmDiag[k])) {
+ double cotan = rkk / lmDiag[k];
+ sin = 1.0 / Math.sqrt(1.0 + cotan * cotan);
+ cos = sin * cotan;
+ } else {
+ double tan = lmDiag[k] / rkk;
+ cos = 1.0 / Math.sqrt(1.0 + tan * tan);
+ sin = cos * tan;
+ }
+
+ // compute the modified diagonal element of R and
+ // the modified element of (Qty,0)
+ jacobian[k][pk] = cos * rkk + sin * lmDiag[k];
+ double temp = cos * work[k] + sin * qtbpj;
+ qtbpj = -sin * work[k] + cos * qtbpj;
+ work[k] = temp;
+
+ // accumulate the tranformation in the row of s
+ for (int i = k + 1; i < solvedCols; ++i) {
+ double rik = jacobian[i][pk];
+ temp = cos * rik + sin * lmDiag[i];
+ lmDiag[i] = -sin * rik + cos * lmDiag[i];
+ jacobian[i][pk] = temp;
+ }
+
+ }
+ }
+
+ // store the diagonal element of s and restore
+ // the corresponding diagonal element of R
+ lmDiag[j] = jacobian[j][permutation[j]];
+ jacobian[j][permutation[j]] = lmDir[j];
+
+ }
+
+ // solve the triangular system for z, if the system is
+ // singular, then obtain a least squares solution
+ int nSing = solvedCols;
+ for (int j = 0; j < solvedCols; ++j) {
+ if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
+ nSing = j;
+ }
+ if (nSing < solvedCols) {
+ work[j] = 0;
+ }
+ }
+ if (nSing > 0) {
+ for (int j = nSing - 1; j >= 0; --j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = j + 1; i < nSing; ++i) {
+ sum += jacobian[i][pj] * work[i];
+ }
+ work[j] = (work[j] - sum) / lmDiag[j];
+ }
+ }
+
+ // permute the components of z back to components of lmDir
+ for (int j = 0; j < lmDir.length; ++j) {
+ lmDir[permutation[j]] = work[j];
+ }
+
+ }
+
+ /**
+ * Decompose a matrix A as A.P = Q.R using Householder transforms.
+ * As suggested in the P. Lascaux and R. Theodor book
+ * Analyse numérique matricielle appliquée à
+ * l'art de l'ingénieur (Masson, 1986), instead of representing
+ * the Householder transforms with uk unit vectors such that:
+ *
+ * Hk = I - 2uk.ukt
+ *
+ * we use k non-unit vectors such that:
+ *
+ * Hk = I - betakvk.vkt
+ *
+ * where vk = ak - alphak ek.
+ * The betak coefficients are provided upon exit as recomputing
+ * them from the vk vectors would be costly.
+ * This decomposition handles rank deficient cases since the tranformations
+ * are performed in non-increasing columns norms order thanks to columns
+ * pivoting. The diagonal elements of the R matrix are therefore also in
+ * non-increasing absolute values order.
+ * @exception OptimizationException if the decomposition cannot be performed
+ */
+ private void qrDecomposition() throws OptimizationException {
+
+ // initializations
+ for (int k = 0; k < cols; ++k) {
+ permutation[k] = k;
+ double norm2 = 0;
+ for (int i = 0; i < jacobian.length; ++i) {
+ double akk = jacobian[i][k];
+ norm2 += akk * akk;
+ }
+ jacNorm[k] = Math.sqrt(norm2);
+ }
+
+ // transform the matrix column after column
+ for (int k = 0; k < cols; ++k) {
+
+ // select the column with the greatest norm on active components
+ int nextColumn = -1;
+ double ak2 = Double.NEGATIVE_INFINITY;
+ for (int i = k; i < cols; ++i) {
+ double norm2 = 0;
+ for (int j = k; j < jacobian.length; ++j) {
+ double aki = jacobian[j][permutation[i]];
+ norm2 += aki * aki;
+ }
+ if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
+ throw new OptimizationException(
+ "unable to perform Q.R decomposition on the {0}x{1} jacobian matrix",
+ rows, cols);
+ }
+ if (norm2 > ak2) {
+ nextColumn = i;
+ ak2 = norm2;
+ }
+ }
+ if (ak2 == 0) {
+ rank = k;
+ return;
+ }
+ int pk = permutation[nextColumn];
+ permutation[nextColumn] = permutation[k];
+ permutation[k] = pk;
+
+ // choose alpha such that Hk.u = alpha ek
+ double akk = jacobian[k][pk];
+ double alpha = (akk > 0) ? -Math.sqrt(ak2) : Math.sqrt(ak2);
+ double betak = 1.0 / (ak2 - akk * alpha);
+ beta[pk] = betak;
+
+ // transform the current column
+ diagR[pk] = alpha;
+ jacobian[k][pk] -= alpha;
+
+ // transform the remaining columns
+ for (int dk = cols - 1 - k; dk > 0; --dk) {
+ double gamma = 0;
+ for (int j = k; j < jacobian.length; ++j) {
+ gamma += jacobian[j][pk] * jacobian[j][permutation[k + dk]];
+ }
+ gamma *= betak;
+ for (int j = k; j < jacobian.length; ++j) {
+ jacobian[j][permutation[k + dk]] -= gamma * jacobian[j][pk];
+ }
+ }
+
+ }
+
+ rank = solvedCols;
+
+ }
+
+ /**
+ * Compute the product Qt.y for some Q.R. decomposition.
+ *
+ * @param y vector to multiply (will be overwritten with the result)
+ */
+ private void qTy(double[] y) {
+ for (int k = 0; k < cols; ++k) {
+ int pk = permutation[k];
+ double gamma = 0;
+ for (int i = k; i < rows; ++i) {
+ gamma += jacobian[i][pk] * y[i];
+ }
+ gamma *= beta[pk];
+ for (int i = k; i < rows; ++i) {
+ y[i] -= gamma * jacobian[i][pk];
+ }
+ }
+ }
+
+}
diff --git a/src/java/org/apache/commons/math/optimization/general/SimpleEstimationProblem.java b/src/java/org/apache/commons/math/optimization/general/SimpleEstimationProblem.java
deleted file mode 100644
index dcf7d5f38..000000000
--- a/src/java/org/apache/commons/math/optimization/general/SimpleEstimationProblem.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Simple implementation of the {@link EstimationProblem
- * EstimationProblem} interface for boilerplate data handling.
- * This class only handles parameters and measurements
- * storage and unbound parameters filtering. It does not compute
- * anything by itself. It should either be used with measurements
- * implementation that are smart enough to know about the
- * various parameters in order to compute the partial derivatives
- * appropriately. Since the problem-specific logic is mainly related to
- * the various measurements models, the simplest way to use this class
- * is by extending it and using one internal class extending
- * {@link WeightedMeasurement WeightedMeasurement} for each measurement
- * type. The instances of the internal classes would have access to the
- * various parameters and their current estimate.
-
- * @version $Revision$ $Date$
- * @since 1.2
-
- */
-public class SimpleEstimationProblem implements EstimationProblem {
-
- /**
- * Build an empty instance without parameters nor measurements.
- */
- public SimpleEstimationProblem() {
- parameters = new ArrayList();
- measurements = new ArrayList();
- }
-
- /**
- * Get all the parameters of the problem.
- * @return parameters
- */
- public EstimatedParameter[] getAllParameters() {
- return (EstimatedParameter[]) parameters.toArray(new EstimatedParameter[parameters.size()]);
- }
-
- /**
- * Get the unbound parameters of the problem.
- * @return unbound parameters
- */
- public EstimatedParameter[] getUnboundParameters() {
-
- // filter the unbound parameters
- List unbound = new ArrayList(parameters.size());
- for (EstimatedParameter p : parameters) {
- if (! p.isBound()) {
- unbound.add(p);
- }
- }
-
- // convert to an array
- return (EstimatedParameter[]) unbound.toArray(new EstimatedParameter[unbound.size()]);
-
- }
-
- /**
- * Get the measurements of an estimation problem.
- * @return measurements
- */
- public WeightedMeasurement[] getMeasurements() {
- return (WeightedMeasurement[]) measurements.toArray(new WeightedMeasurement[measurements.size()]);
- }
-
- /** Add a parameter to the problem.
- * @param p parameter to add
- */
- protected void addParameter(EstimatedParameter p) {
- parameters.add(p);
- }
-
- /**
- * Add a new measurement to the set.
- * @param m measurement to add
- */
- protected void addMeasurement(WeightedMeasurement m) {
- measurements.add(m);
- }
-
- /** Estimated parameters. */
- private final List parameters;
-
- /** Measurements. */
- private final List measurements;
-
-}
diff --git a/src/java/org/apache/commons/math/optimization/general/WeightedMeasurement.java b/src/java/org/apache/commons/math/optimization/general/WeightedMeasurement.java
deleted file mode 100644
index aa7b3f612..000000000
--- a/src/java/org/apache/commons/math/optimization/general/WeightedMeasurement.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.io.Serializable;
-
-/**
- * This class represents measurements in estimation problems.
- *
- * This abstract class implements all the methods needed to handle
- * measurements in a general way. It defines neither the {@link
- * #getTheoreticalValue getTheoreticalValue} nor the {@link
- * #getPartial getPartial} methods, which should be defined by
- * sub-classes according to the specific problem.
- *
- * The {@link #getTheoreticalValue getTheoreticalValue} and {@link
- * #getPartial getPartial} methods must always use the current
- * estimate of the parameters set by the solver in the problem. These
- * parameters can be retrieved through the {@link
- * EstimationProblem#getAllParameters
- * EstimationProblem.getAllParameters} method if the measurements are
- * independent of the problem, or directly if they are implemented as
- * inner classes of the problem.
- *
- * The instances for which the ignored
flag is set
- * through the {@link #setIgnored setIgnored} method are ignored by the
- * solvers. This can be used to reject wrong measurements at some
- * steps of the estimation.
- *
- * @see EstimationProblem
- *
- * @version $Revision$ $Date$
- * @since 1.2
- *
- */
-
-public abstract class WeightedMeasurement implements Serializable {
-
- /** Serializable version identifier. */
- private static final long serialVersionUID = 4360046376796901941L;
-
- /**
- * Simple constructor.
- * Build a measurement with the given parameters, and set its ignore
- * flag to false.
- * @param weight weight of the measurement in the least squares problem
- * (two common choices are either to use 1.0 for all measurements, or to
- * use a value proportional to the inverse of the variance of the measurement
- * type)
- *
- * @param measuredValue measured value
- */
- public WeightedMeasurement(double weight, double measuredValue) {
- this.weight = weight;
- this.measuredValue = measuredValue;
- ignored = false;
- }
-
- /** Simple constructor.
- *
- * Build a measurement with the given parameters
- *
- * @param weight weight of the measurement in the least squares problem
- * @param measuredValue measured value
- * @param ignored true if the measurement should be ignored
- */
- public WeightedMeasurement(double weight, double measuredValue,
- boolean ignored) {
- this.weight = weight;
- this.measuredValue = measuredValue;
- this.ignored = ignored;
- }
-
- /**
- * Get the weight of the measurement in the least squares problem
- *
- * @return weight
- */
- public double getWeight() {
- return weight;
- }
-
- /**
- * Get the measured value
- *
- * @return measured value
- */
- public double getMeasuredValue() {
- return measuredValue;
- }
-
- /**
- * Get the residual for this measurement
- * The residual is the measured value minus the theoretical value.
- *
- * @return residual
- */
- public double getResidual() {
- return measuredValue - getTheoreticalValue();
- }
-
- /**
- * Get the theoretical value expected for this measurement
- * The theoretical value is the value expected for this measurement
- * if the model and its parameter were all perfectly known.
- * The value must be computed using the current estimate of the parameters
- * set by the solver in the problem.
- *
- * @return theoretical value
- */
- public abstract double getTheoreticalValue();
-
- /**
- * Get the partial derivative of the {@link #getTheoreticalValue
- * theoretical value} according to the parameter.
- * The value must be computed using the current estimate of the parameters
- * set by the solver in the problem.
- *
- * @param parameter parameter against which the partial derivative
- * should be computed
- * @return partial derivative of the {@link #getTheoreticalValue
- * theoretical value}
- */
- public abstract double getPartial(EstimatedParameter parameter);
-
- /**
- * Set the ignore flag to the specified value
- * Setting the ignore flag to true allow to reject wrong
- * measurements, which sometimes can be detected only rather late.
- *
- * @param ignored value for the ignore flag
- */
- public void setIgnored(boolean ignored) {
- this.ignored = ignored;
- }
-
- /**
- * Check if this measurement should be ignored
- *
- * @return true if the measurement should be ignored
- */
- public boolean isIgnored() {
- return ignored;
- }
-
- /** Measurement weight. */
- private final double weight;
-
- /** Value of the measurements. */
- private final double measuredValue;
-
- /** Ignore measurement indicator. */
- private boolean ignored;
-
-}
diff --git a/src/test/org/apache/commons/math/optimization/general/EstimatedParameterTest.java b/src/test/org/apache/commons/math/optimization/general/EstimatedParameterTest.java
deleted file mode 100644
index 693a2f321..000000000
--- a/src/test/org/apache/commons/math/optimization/general/EstimatedParameterTest.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-
-import junit.framework.*;
-
-public class EstimatedParameterTest
- extends TestCase {
-
- public EstimatedParameterTest(String name) {
- super(name);
- }
-
- public void testConstruction() {
-
- EstimatedParameter p1 = new EstimatedParameter("p1", 1.0);
- assertTrue(p1.getName().equals("p1"));
- checkValue(p1.getEstimate(), 1.0);
- assertTrue(! p1.isBound());
-
- EstimatedParameter p2 = new EstimatedParameter("p2", 2.0, true);
- assertTrue(p2.getName().equals("p2"));
- checkValue(p2.getEstimate(), 2.0);
- assertTrue(p2.isBound());
-
- }
-
- public void testBound() {
-
- EstimatedParameter p = new EstimatedParameter("p", 0.0);
- assertTrue(! p.isBound());
- p.setBound(true);
- assertTrue(p.isBound());
- p.setBound(false);
- assertTrue(! p.isBound());
-
- }
-
- public void testEstimate() {
-
- EstimatedParameter p = new EstimatedParameter("p", 0.0);
- checkValue(p.getEstimate(), 0.0);
-
- for (double e = 0.0; e < 10.0; e += 0.5) {
- p.setEstimate(e);
- checkValue(p.getEstimate(), e);
- }
-
- }
-
- public static Test suite() {
- return new TestSuite(EstimatedParameterTest.class);
- }
-
- private void checkValue(double value, double expected) {
- assertTrue(Math.abs(value - expected) < 1.0e-10);
- }
-
-}
diff --git a/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimatorTest.java b/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimatorTest.java
deleted file mode 100644
index 9ebc84283..000000000
--- a/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtEstimatorTest.java
+++ /dev/null
@@ -1,846 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-
-import org.apache.commons.math.optimization.OptimizationException;
-
-
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-
-/**
- * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
- * The redistribution policy for MINPACK is available here, for
- * convenience, it is reproduced below.
-
- *
- *
- * Minpack Copyright Notice (1999) University of Chicago.
- * All rights reserved
- * |
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * - The end-user documentation included with the redistribution, if any,
- * must include the following acknowledgment:
- *
This product includes software developed by the University of
- * Chicago, as Operator of Argonne National Laboratory.
- * Alternately, this acknowledgment may appear in the software itself,
- * if and wherever such third-party acknowledgments normally appear.
- * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
- * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
- * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
- * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
- * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
- * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
- * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
- * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
- * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
- * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
- * BE CORRECTED.
- * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
- * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
- * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
- * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
- * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
- * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
- * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
- * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
- * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
- * POSSIBILITY OF SUCH LOSS OR DAMAGES.
- *
|
- *
-
- * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
- * @author Burton S. Garbow (original fortran minpack tests)
- * @author Kenneth E. Hillstrom (original fortran minpack tests)
- * @author Jorge J. More (original fortran minpack tests)
- * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
- */
-public class LevenbergMarquardtEstimatorTest
- extends TestCase {
-
- public LevenbergMarquardtEstimatorTest(String name) {
- super(name);
- }
-
- public void testTrivial() throws OptimizationException {
- LinearProblem problem =
- new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] {2},
- new EstimatedParameter[] {
- new EstimatedParameter("p0", 0)
- }, 3.0)
- });
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- try {
- estimator.guessParametersErrors(problem);
- fail("an exception should have been thrown");
- } catch (OptimizationException ee) {
- // expected behavior
- } catch (Exception e) {
- fail("wrong exception caught");
- }
- assertEquals(1.5,
- problem.getUnboundParameters()[0].getEstimate(),
- 1.0e-10);
- }
-
- public void testQRColumnsPermutation() throws OptimizationException {
-
- EstimatedParameter[] x = {
- new EstimatedParameter("p0", 0), new EstimatedParameter("p1", 0)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0, -1.0 },
- new EstimatedParameter[] { x[0], x[1] },
- 4.0),
- new LinearMeasurement(new double[] { 2.0 },
- new EstimatedParameter[] { x[1] },
- 6.0),
- new LinearMeasurement(new double[] { 1.0, -2.0 },
- new EstimatedParameter[] { x[0], x[1] },
- 1.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- assertEquals(7.0, x[0].getEstimate(), 1.0e-10);
- assertEquals(3.0, x[1].getEstimate(), 1.0e-10);
-
- }
-
- public void testNoDependency() throws OptimizationException {
- EstimatedParameter[] p = new EstimatedParameter[] {
- new EstimatedParameter("p0", 0),
- new EstimatedParameter("p1", 0),
- new EstimatedParameter("p2", 0),
- new EstimatedParameter("p3", 0),
- new EstimatedParameter("p4", 0),
- new EstimatedParameter("p5", 0)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[0] }, 0.0),
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[1] }, 1.1),
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[2] }, 2.2),
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[3] }, 3.3),
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[4] }, 4.4),
- new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[5] }, 5.5)
- });
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- for (int i = 0; i < p.length; ++i) {
- assertEquals(0.55 * i, p[i].getEstimate(), 1.0e-10);
- }
-}
-
- public void testOneSet() throws OptimizationException {
-
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 0),
- new EstimatedParameter("p1", 0),
- new EstimatedParameter("p2", 0)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0 },
- new EstimatedParameter[] { p[0] },
- 1.0),
- new LinearMeasurement(new double[] { -1.0, 1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 1.0),
- new LinearMeasurement(new double[] { -1.0, 1.0 },
- new EstimatedParameter[] { p[1], p[2] },
- 1.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- assertEquals(1.0, p[0].getEstimate(), 1.0e-10);
- assertEquals(2.0, p[1].getEstimate(), 1.0e-10);
- assertEquals(3.0, p[2].getEstimate(), 1.0e-10);
-
- }
-
- public void testTwoSets() throws OptimizationException {
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 0),
- new EstimatedParameter("p1", 1),
- new EstimatedParameter("p2", 2),
- new EstimatedParameter("p3", 3),
- new EstimatedParameter("p4", 4),
- new EstimatedParameter("p5", 5)
- };
-
- double epsilon = 1.0e-7;
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
-
- // 4 elements sub-problem
- new LinearMeasurement(new double[] { 2.0, 1.0, 4.0 },
- new EstimatedParameter[] { p[0], p[1], p[3] },
- 2.0),
- new LinearMeasurement(new double[] { -4.0, -2.0, 3.0, -7.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- -9.0),
- new LinearMeasurement(new double[] { 4.0, 1.0, -2.0, 8.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 2.0),
- new LinearMeasurement(new double[] { -3.0, -12.0, -1.0 },
- new EstimatedParameter[] { p[1], p[2], p[3] },
- 2.0),
-
- // 2 elements sub-problem
- new LinearMeasurement(new double[] { epsilon, 1.0 },
- new EstimatedParameter[] { p[4], p[5] },
- 1.0 + epsilon * epsilon),
- new LinearMeasurement(new double[] { 1.0, 1.0 },
- new EstimatedParameter[] { p[4], p[5] },
- 2.0)
-
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- assertEquals( 3.0, p[0].getEstimate(), 1.0e-10);
- assertEquals( 4.0, p[1].getEstimate(), 1.0e-10);
- assertEquals(-1.0, p[2].getEstimate(), 1.0e-10);
- assertEquals(-2.0, p[3].getEstimate(), 1.0e-10);
- assertEquals( 1.0 + epsilon, p[4].getEstimate(), 1.0e-10);
- assertEquals( 1.0 - epsilon, p[5].getEstimate(), 1.0e-10);
-
- }
-
- public void testNonInversible() throws OptimizationException {
-
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 0),
- new EstimatedParameter("p1", 0),
- new EstimatedParameter("p2", 0)
- };
- LinearMeasurement[] m = new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0, 2.0, -3.0 },
- new EstimatedParameter[] { p[0], p[1], p[2] },
- 1.0),
- new LinearMeasurement(new double[] { 2.0, 1.0, 3.0 },
- new EstimatedParameter[] { p[0], p[1], p[2] },
- 1.0),
- new LinearMeasurement(new double[] { -3.0, -9.0 },
- new EstimatedParameter[] { p[0], p[2] },
- 1.0)
- };
- LinearProblem problem = new LinearProblem(m);
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- double initialCost = estimator.getRMS(problem);
- estimator.estimate(problem);
- assertTrue(estimator.getRMS(problem) < initialCost);
- assertTrue(Math.sqrt(m.length) * estimator.getRMS(problem) > 0.6);
- try {
- estimator.getCovariances(problem);
- fail("an exception should have been thrown");
- } catch (OptimizationException ee) {
- // expected behavior
- } catch (Exception e) {
- fail("wrong exception caught");
- }
- double dJ0 = 2 * (m[0].getResidual() * m[0].getPartial(p[0])
- + m[1].getResidual() * m[1].getPartial(p[0])
- + m[2].getResidual() * m[2].getPartial(p[0]));
- double dJ1 = 2 * (m[0].getResidual() * m[0].getPartial(p[1])
- + m[1].getResidual() * m[1].getPartial(p[1]));
- double dJ2 = 2 * (m[0].getResidual() * m[0].getPartial(p[2])
- + m[1].getResidual() * m[1].getPartial(p[2])
- + m[2].getResidual() * m[2].getPartial(p[2]));
- assertEquals(0, dJ0, 1.0e-10);
- assertEquals(0, dJ1, 1.0e-10);
- assertEquals(0, dJ2, 1.0e-10);
-
- }
-
- public void testIllConditioned() throws OptimizationException {
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 0),
- new EstimatedParameter("p1", 1),
- new EstimatedParameter("p2", 2),
- new EstimatedParameter("p3", 3)
- };
-
- LinearProblem problem1 = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 10.0, 7.0, 8.0, 7.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 32.0),
- new LinearMeasurement(new double[] { 7.0, 5.0, 6.0, 5.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 23.0),
- new LinearMeasurement(new double[] { 8.0, 6.0, 10.0, 9.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 33.0),
- new LinearMeasurement(new double[] { 7.0, 5.0, 9.0, 10.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 31.0)
- });
- LevenbergMarquardtEstimator estimator1 = new LevenbergMarquardtEstimator();
- estimator1.estimate(problem1);
- assertEquals(0, estimator1.getRMS(problem1), 1.0e-10);
- assertEquals(1.0, p[0].getEstimate(), 1.0e-10);
- assertEquals(1.0, p[1].getEstimate(), 1.0e-10);
- assertEquals(1.0, p[2].getEstimate(), 1.0e-10);
- assertEquals(1.0, p[3].getEstimate(), 1.0e-10);
-
- LinearProblem problem2 = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 10.0, 7.0, 8.1, 7.2 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 32.0),
- new LinearMeasurement(new double[] { 7.08, 5.04, 6.0, 5.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 23.0),
- new LinearMeasurement(new double[] { 8.0, 5.98, 9.89, 9.0 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 33.0),
- new LinearMeasurement(new double[] { 6.99, 4.99, 9.0, 9.98 },
- new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
- 31.0)
- });
- LevenbergMarquardtEstimator estimator2 = new LevenbergMarquardtEstimator();
- estimator2.estimate(problem2);
- assertEquals(0, estimator2.getRMS(problem2), 1.0e-10);
- assertEquals(-81.0, p[0].getEstimate(), 1.0e-8);
- assertEquals(137.0, p[1].getEstimate(), 1.0e-8);
- assertEquals(-34.0, p[2].getEstimate(), 1.0e-8);
- assertEquals( 22.0, p[3].getEstimate(), 1.0e-8);
-
- }
-
- public void testMoreEstimatedParametersSimple() throws OptimizationException {
-
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 7),
- new EstimatedParameter("p1", 6),
- new EstimatedParameter("p2", 5),
- new EstimatedParameter("p3", 4)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 3.0, 2.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 7.0),
- new LinearMeasurement(new double[] { 1.0, -1.0, 1.0 },
- new EstimatedParameter[] { p[1], p[2], p[3] },
- 3.0),
- new LinearMeasurement(new double[] { 2.0, 1.0 },
- new EstimatedParameter[] { p[0], p[2] },
- 5.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
-
- }
-
- public void testMoreEstimatedParametersUnsorted() throws OptimizationException {
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 2),
- new EstimatedParameter("p1", 2),
- new EstimatedParameter("p2", 2),
- new EstimatedParameter("p3", 2),
- new EstimatedParameter("p4", 2),
- new EstimatedParameter("p5", 2)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0, 1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 3.0),
- new LinearMeasurement(new double[] { 1.0, 1.0, 1.0 },
- new EstimatedParameter[] { p[2], p[3], p[4] },
- 12.0),
- new LinearMeasurement(new double[] { 1.0, -1.0 },
- new EstimatedParameter[] { p[4], p[5] },
- -1.0),
- new LinearMeasurement(new double[] { 1.0, -1.0, 1.0 },
- new EstimatedParameter[] { p[3], p[2], p[5] },
- 7.0),
- new LinearMeasurement(new double[] { 1.0, -1.0 },
- new EstimatedParameter[] { p[4], p[3] },
- 1.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- assertEquals(3.0, p[2].getEstimate(), 1.0e-10);
- assertEquals(4.0, p[3].getEstimate(), 1.0e-10);
- assertEquals(5.0, p[4].getEstimate(), 1.0e-10);
- assertEquals(6.0, p[5].getEstimate(), 1.0e-10);
-
- }
-
- public void testRedundantEquations() throws OptimizationException {
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 1),
- new EstimatedParameter("p1", 1)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0, 1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 3.0),
- new LinearMeasurement(new double[] { 1.0, -1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 1.0),
- new LinearMeasurement(new double[] { 1.0, 3.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 5.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertEquals(0, estimator.getRMS(problem), 1.0e-10);
- assertEquals(2.0, p[0].getEstimate(), 1.0e-10);
- assertEquals(1.0, p[1].getEstimate(), 1.0e-10);
-
- }
-
- public void testInconsistentEquations() throws OptimizationException {
- EstimatedParameter[] p = {
- new EstimatedParameter("p0", 1),
- new EstimatedParameter("p1", 1)
- };
- LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
- new LinearMeasurement(new double[] { 1.0, 1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 3.0),
- new LinearMeasurement(new double[] { 1.0, -1.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 1.0),
- new LinearMeasurement(new double[] { 1.0, 3.0 },
- new EstimatedParameter[] { p[0], p[1] },
- 4.0)
- });
-
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(problem);
- assertTrue(estimator.getRMS(problem) > 0.1);
-
- }
-
- public void testControlParameters() throws OptimizationException {
- Circle circle = new Circle(98.680, 47.345);
- circle.addPoint( 30.0, 68.0);
- circle.addPoint( 50.0, -6.0);
- circle.addPoint(110.0, -20.0);
- circle.addPoint( 35.0, 15.0);
- circle.addPoint( 45.0, 97.0);
- checkEstimate(circle, 0.1, 10, 1.0e-14, 1.0e-16, 1.0e-10, false);
- checkEstimate(circle, 0.1, 10, 1.0e-15, 1.0e-17, 1.0e-10, true);
- checkEstimate(circle, 0.1, 5, 1.0e-15, 1.0e-16, 1.0e-10, true);
- circle.addPoint(300, -300);
- checkEstimate(circle, 0.1, 20, 1.0e-18, 1.0e-16, 1.0e-10, true);
- }
-
- private void checkEstimate(EstimationProblem problem,
- double initialStepBoundFactor, int maxCostEval,
- double costRelativeTolerance, double parRelativeTolerance,
- double orthoTolerance, boolean shouldFail) {
- try {
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.setInitialStepBoundFactor(initialStepBoundFactor);
- estimator.setMaxCostEval(maxCostEval);
- estimator.setCostRelativeTolerance(costRelativeTolerance);
- estimator.setParRelativeTolerance(parRelativeTolerance);
- estimator.setOrthoTolerance(orthoTolerance);
- estimator.estimate(problem);
- assertTrue(! shouldFail);
- } catch (OptimizationException ee) {
- assertTrue(shouldFail);
- } catch (Exception e) {
- fail("wrong exception type caught");
- }
- }
-
- public void testCircleFitting() throws OptimizationException {
- Circle circle = new Circle(98.680, 47.345);
- circle.addPoint( 30.0, 68.0);
- circle.addPoint( 50.0, -6.0);
- circle.addPoint(110.0, -20.0);
- circle.addPoint( 35.0, 15.0);
- circle.addPoint( 45.0, 97.0);
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(circle);
- assertTrue(estimator.getCostEvaluations() < 10);
- assertTrue(estimator.getJacobianEvaluations() < 10);
- double rms = estimator.getRMS(circle);
- assertEquals(1.768262623567235, Math.sqrt(circle.getM()) * rms, 1.0e-10);
- assertEquals(69.96016176931406, circle.getRadius(), 1.0e-10);
- assertEquals(96.07590211815305, circle.getX(), 1.0e-10);
- assertEquals(48.13516790438953, circle.getY(), 1.0e-10);
- double[][] cov = estimator.getCovariances(circle);
- assertEquals(1.839, cov[0][0], 0.001);
- assertEquals(0.731, cov[0][1], 0.001);
- assertEquals(cov[0][1], cov[1][0], 1.0e-14);
- assertEquals(0.786, cov[1][1], 0.001);
- double[] errors = estimator.guessParametersErrors(circle);
- assertEquals(1.384, errors[0], 0.001);
- assertEquals(0.905, errors[1], 0.001);
-
- // add perfect measurements and check errors are reduced
- double cx = circle.getX();
- double cy = circle.getY();
- double r = circle.getRadius();
- for (double d= 0; d < 2 * Math.PI; d += 0.01) {
- circle.addPoint(cx + r * Math.cos(d), cy + r * Math.sin(d));
- }
- estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(circle);
- cov = estimator.getCovariances(circle);
- assertEquals(0.004, cov[0][0], 0.001);
- assertEquals(6.40e-7, cov[0][1], 1.0e-9);
- assertEquals(cov[0][1], cov[1][0], 1.0e-14);
- assertEquals(0.003, cov[1][1], 0.001);
- errors = estimator.guessParametersErrors(circle);
- assertEquals(0.004, errors[0], 0.001);
- assertEquals(0.004, errors[1], 0.001);
-
- }
-
- public void testCircleFittingBadInit() throws OptimizationException {
- Circle circle = new Circle(-12, -12);
- double[][] points = new double[][] {
- {-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
- {-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
- {-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
- {-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
- { 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
- { 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
- {-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
- {-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
- {-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
- {-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
- {-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
- { 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
- { 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
- {-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
- {-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
- {-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
- {-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
- {-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
- { 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
- { 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
- { 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
- {-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
- {-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
- {-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
- {-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
- {-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
- { 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
- { 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
- {-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
- };
- for (int i = 0; i < points.length; ++i) {
- circle.addPoint(points[i][0], points[i][1]);
- }
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.estimate(circle);
- assertTrue(estimator.getCostEvaluations() < 15);
- assertTrue(estimator.getJacobianEvaluations() < 10);
- assertEquals( 0.030184491196225207, estimator.getRMS(circle), 1.0e-9);
- assertEquals( 0.2922350065939634, circle.getRadius(), 1.0e-9);
- assertEquals(-0.15173845023862165, circle.getX(), 1.0e-8);
- assertEquals( 0.20750021499570379, circle.getY(), 1.0e-8);
- }
-
- public void testMath199() {
- try {
- QuadraticProblem problem = new QuadraticProblem();
- problem.addPoint (0, -3.182591015485607, 0.0);
- problem.addPoint (1, -2.5581184967730577, 4.4E-323);
- problem.addPoint (2, -2.1488478161387325, 1.0);
- problem.addPoint (3, -1.9122489313410047, 4.4E-323);
- problem.addPoint (4, 1.7785661310051026, 0.0);
- new LevenbergMarquardtEstimator().estimate(problem);
- fail("an exception should have been thrown");
- } catch (OptimizationException ee) {
- // expected behavior
- }
-
- }
-
- private static class LinearProblem implements EstimationProblem {
-
- public LinearProblem(LinearMeasurement[] measurements) {
- this.measurements = measurements;
- }
-
- public WeightedMeasurement[] getMeasurements() {
- return measurements;
- }
-
- public EstimatedParameter[] getUnboundParameters() {
- return getAllParameters();
- }
-
- public EstimatedParameter[] getAllParameters() {
- HashSet set = new HashSet();
- for (int i = 0; i < measurements.length; ++i) {
- EstimatedParameter[] parameters = measurements[i].getParameters();
- for (int j = 0; j < parameters.length; ++j) {
- set.add(parameters[j]);
- }
- }
- return (EstimatedParameter[]) set.toArray(new EstimatedParameter[set.size()]);
- }
-
- private LinearMeasurement[] measurements;
-
- }
-
- private static class LinearMeasurement extends WeightedMeasurement {
-
- public LinearMeasurement(double[] factors, EstimatedParameter[] parameters,
- double setPoint) {
- super(1.0, setPoint);
- this.factors = factors;
- this.parameters = parameters;
- }
-
- public double getTheoreticalValue() {
- double v = 0;
- for (int i = 0; i < factors.length; ++i) {
- v += factors[i] * parameters[i].getEstimate();
- }
- return v;
- }
-
- public double getPartial(EstimatedParameter parameter) {
- for (int i = 0; i < parameters.length; ++i) {
- if (parameters[i] == parameter) {
- return factors[i];
- }
- }
- return 0;
- }
-
- public EstimatedParameter[] getParameters() {
- return parameters;
- }
-
- private double[] factors;
- private EstimatedParameter[] parameters;
- private static final long serialVersionUID = -3922448707008868580L;
-
- }
-
- private static class Circle implements EstimationProblem {
-
- public Circle(double cx, double cy) {
- this.cx = new EstimatedParameter("cx", cx);
- this.cy = new EstimatedParameter("cy", cy);
- points = new ArrayList();
- }
-
- public void addPoint(double px, double py) {
- points.add(new PointModel(px, py));
- }
-
- public int getM() {
- return points.size();
- }
-
- public WeightedMeasurement[] getMeasurements() {
- return (WeightedMeasurement[]) points.toArray(new PointModel[points.size()]);
- }
-
- public EstimatedParameter[] getAllParameters() {
- return new EstimatedParameter[] { cx, cy };
- }
-
- public EstimatedParameter[] getUnboundParameters() {
- return new EstimatedParameter[] { cx, cy };
- }
-
- public double getPartialRadiusX() {
- double dRdX = 0;
- for (PointModel point : points) {
- dRdX += point.getPartialDiX();
- }
- return dRdX / points.size();
- }
-
- public double getPartialRadiusY() {
- double dRdY = 0;
- for (PointModel point : points) {
- dRdY += point.getPartialDiY();
- }
- return dRdY / points.size();
- }
-
- public double getRadius() {
- double r = 0;
- for (PointModel point : points) {
- r += point.getCenterDistance();
- }
- return r / points.size();
- }
-
- public double getX() {
- return cx.getEstimate();
- }
-
- public double getY() {
- return cy.getEstimate();
- }
-
- private class PointModel extends WeightedMeasurement {
-
- public PointModel(double px, double py) {
- super(1.0, 0.0);
- this.px = px;
- this.py = py;
- }
-
- public double getPartial(EstimatedParameter parameter) {
- if (parameter == cx) {
- return getPartialDiX() - getPartialRadiusX();
- } else if (parameter == cy) {
- return getPartialDiY() - getPartialRadiusY();
- }
- return 0;
- }
-
- public double getCenterDistance() {
- double dx = px - cx.getEstimate();
- double dy = py - cy.getEstimate();
- return Math.sqrt(dx * dx + dy * dy);
- }
-
- public double getPartialDiX() {
- return (cx.getEstimate() - px) / getCenterDistance();
- }
-
- public double getPartialDiY() {
- return (cy.getEstimate() - py) / getCenterDistance();
- }
-
- public double getTheoreticalValue() {
- return getCenterDistance() - getRadius();
- }
-
- private double px;
- private double py;
- private static final long serialVersionUID = 1L;
-
- }
-
- private EstimatedParameter cx;
- private EstimatedParameter cy;
- private ArrayList points;
-
- }
-
- private static class QuadraticProblem extends SimpleEstimationProblem {
-
- private EstimatedParameter a;
- private EstimatedParameter b;
- private EstimatedParameter c;
-
- public QuadraticProblem() {
- a = new EstimatedParameter("a", 0.0);
- b = new EstimatedParameter("b", 0.0);
- c = new EstimatedParameter("c", 0.0);
- addParameter(a);
- addParameter(b);
- addParameter(c);
- }
-
- public void addPoint(double x, double y, double w) {
- addMeasurement(new LocalMeasurement(x, y, w));
- }
-
- public double getA() {
- return a.getEstimate();
- }
-
- public double getB() {
- return b.getEstimate();
- }
-
- public double getC() {
- return c.getEstimate();
- }
-
- public double theoreticalValue(double x) {
- return ( (a.getEstimate() * x + b.getEstimate() ) * x + c.getEstimate());
- }
-
- private double partial(double x, EstimatedParameter parameter) {
- if (parameter == a) {
- return x * x;
- } else if (parameter == b) {
- return x;
- } else {
- return 1.0;
- }
- }
-
- private class LocalMeasurement extends WeightedMeasurement {
-
- private static final long serialVersionUID = 1555043155023729130L;
- private final double x;
-
- // constructor
- public LocalMeasurement(double x, double y, double w) {
- super(w, y);
- this.x = x;
- }
-
- public double getTheoreticalValue() {
- return theoreticalValue(x);
- }
-
- public double getPartial(EstimatedParameter parameter) {
- return partial(x, parameter);
- }
-
- }
- }
-
- public static Test suite() {
- return new TestSuite(LevenbergMarquardtEstimatorTest.class);
- }
-
-}
diff --git a/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizerTest.java b/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizerTest.java
new file mode 100644
index 000000000..221f76179
--- /dev/null
+++ b/src/test/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizerTest.java
@@ -0,0 +1,649 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math.optimization.general;
+
+import java.awt.geom.Point2D;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.apache.commons.math.linear.DenseRealMatrix;
+import org.apache.commons.math.linear.RealMatrix;
+import org.apache.commons.math.optimization.ObjectiveException;
+import org.apache.commons.math.optimization.OptimizationException;
+import org.apache.commons.math.optimization.SimpleVectorialValueChecker;
+import org.apache.commons.math.optimization.VectorialDifferentiableObjectiveFunction;
+import org.apache.commons.math.optimization.VectorialPointValuePair;
+
+/**
+ * Some of the unit tests are re-implementations of the MINPACK file17 and file22 test files.
+ * The redistribution policy for MINPACK is available here, for
+ * convenience, it is reproduced below.
+
+ *
+ *
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * |
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * - The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ *
This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.
+ * - WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.
+ * - LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.
+ *
|
+ *
+
+ * @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
+ * @author Burton S. Garbow (original fortran minpack tests)
+ * @author Kenneth E. Hillstrom (original fortran minpack tests)
+ * @author Jorge J. More (original fortran minpack tests)
+ * @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
+ */
+public class LevenbergMarquardtOptimizerTest
+ extends TestCase {
+
+ public LevenbergMarquardtOptimizerTest(String name) {
+ super(name);
+ }
+
+ public void testTrivial() throws ObjectiveException, OptimizationException {
+ LinearProblem problem =
+ new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1 }, new double[] { 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ try {
+ optimizer.guessParametersErrors();
+ fail("an exception should have been thrown");
+ } catch (OptimizationException ee) {
+ // expected behavior
+ } catch (Exception e) {
+ fail("wrong exception caught");
+ }
+ assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
+ }
+
+ public void testQRColumnsPermutation() throws ObjectiveException, OptimizationException {
+
+ LinearProblem problem =
+ new LinearProblem(new double[][] { { 1.0, -1.0 }, { 0.0, 2.0 }, { 1.0, -2.0 } },
+ new double[] { 4.0, 6.0, 1.0 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(7.0, optimum.getPoint()[0], 1.0e-10);
+ assertEquals(3.0, optimum.getPoint()[1], 1.0e-10);
+
+ }
+
+ public void testNoDependency() throws ObjectiveException, OptimizationException {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 0, 0, 0, 0, 0 },
+ { 0, 2, 0, 0, 0, 0 },
+ { 0, 0, 2, 0, 0, 0 },
+ { 0, 0, 0, 2, 0, 0 },
+ { 0, 0, 0, 0, 2, 0 },
+ { 0, 0, 0, 0, 0, 2 }
+ }, new double[] { 0.0, 1.1, 2.2, 3.3, 4.4, 5.5 });
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
+ new double[] { 0, 0, 0, 0, 0, 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ for (int i = 0; i < problem.target.length; ++i) {
+ assertEquals(0.55 * i, optimum.getPoint()[i], 1.0e-10);
+ }
+ }
+
+ public void testOneSet() throws ObjectiveException, OptimizationException {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 0, 0 },
+ { -1, 1, 0 },
+ { 0, -1, 1 }
+ }, new double[] { 1, 1, 1});
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(1.0, optimum.getPoint()[0], 1.0e-10);
+ assertEquals(2.0, optimum.getPoint()[1], 1.0e-10);
+ assertEquals(3.0, optimum.getPoint()[2], 1.0e-10);
+
+ }
+
+ public void testTwoSets() throws ObjectiveException, OptimizationException {
+ double epsilon = 1.0e-7;
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 2, 1, 0, 4, 0, 0 },
+ { -4, -2, 3, -7, 0, 0 },
+ { 4, 1, -2, 8, 0, 0 },
+ { 0, -3, -12, -1, 0, 0 },
+ { 0, 0, 0, 0, epsilon, 1 },
+ { 0, 0, 0, 0, 1, 1 }
+ }, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
+ new double[] { 0, 0, 0, 0, 0, 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals( 3.0, optimum.getPoint()[0], 1.0e-10);
+ assertEquals( 4.0, optimum.getPoint()[1], 1.0e-10);
+ assertEquals(-1.0, optimum.getPoint()[2], 1.0e-10);
+ assertEquals(-2.0, optimum.getPoint()[3], 1.0e-10);
+ assertEquals( 1.0 + epsilon, optimum.getPoint()[4], 1.0e-10);
+ assertEquals( 1.0 - epsilon, optimum.getPoint()[5], 1.0e-10);
+
+ }
+
+ public void testNonInversible() throws ObjectiveException, OptimizationException {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1, 2, -3 },
+ { 2, 1, 3 },
+ { -3, 0, -9 }
+ }, new double[] { 1, 1, 1 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
+ assertTrue(Math.sqrt(problem.target.length) * optimizer.getRMS() > 0.6);
+ try {
+ optimizer.getCovariances();
+ fail("an exception should have been thrown");
+ } catch (OptimizationException ee) {
+ // expected behavior
+ } catch (Exception e) {
+ fail("wrong exception caught");
+ }
+
+ }
+
+ public void testIllConditioned() throws ObjectiveException, OptimizationException {
+ LinearProblem problem1 = new LinearProblem(new double[][] {
+ { 10.0, 7.0, 8.0, 7.0 },
+ { 7.0, 5.0, 6.0, 5.0 },
+ { 8.0, 6.0, 10.0, 9.0 },
+ { 7.0, 5.0, 9.0, 10.0 }
+ }, new double[] { 32, 23, 33, 31 });
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum1 =
+ optimizer.optimize(problem1, problem1.target, new double[] { 1, 1, 1, 1 },
+ new double[] { 0, 1, 2, 3 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(1.0, optimum1.getPoint()[0], 1.0e-10);
+ assertEquals(1.0, optimum1.getPoint()[1], 1.0e-10);
+ assertEquals(1.0, optimum1.getPoint()[2], 1.0e-10);
+ assertEquals(1.0, optimum1.getPoint()[3], 1.0e-10);
+
+ LinearProblem problem2 = new LinearProblem(new double[][] {
+ { 10.00, 7.00, 8.10, 7.20 },
+ { 7.08, 5.04, 6.00, 5.00 },
+ { 8.00, 5.98, 9.89, 9.00 },
+ { 6.99, 4.99, 9.00, 9.98 }
+ }, new double[] { 32, 23, 33, 31 });
+ VectorialPointValuePair optimum2 =
+ optimizer.optimize(problem2, problem2.target, new double[] { 1, 1, 1, 1 },
+ new double[] { 0, 1, 2, 3 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(-81.0, optimum2.getPoint()[0], 1.0e-8);
+ assertEquals(137.0, optimum2.getPoint()[1], 1.0e-8);
+ assertEquals(-34.0, optimum2.getPoint()[2], 1.0e-8);
+ assertEquals( 22.0, optimum2.getPoint()[3], 1.0e-8);
+
+ }
+
+ public void testMoreEstimatedParametersSimple() throws ObjectiveException, OptimizationException {
+
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 3.0, 2.0, 0.0, 0.0 },
+ { 0.0, 1.0, -1.0, 1.0 },
+ { 2.0, 0.0, 1.0, 0.0 }
+ }, new double[] { 7.0, 3.0, 5.0 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 },
+ new double[] { 7, 6, 5, 4 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+
+ }
+
+ public void testMoreEstimatedParametersUnsorted() throws ObjectiveException, OptimizationException {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
+ { 0.0, 0.0, 1.0, 1.0, 1.0, 0.0 },
+ { 0.0, 0.0, 0.0, 0.0, 1.0, -1.0 },
+ { 0.0, 0.0, -1.0, 1.0, 0.0, 1.0 },
+ { 0.0, 0.0, 0.0, -1.0, 1.0, 0.0 }
+ }, new double[] { 3.0, 12.0, -1.0, 7.0, 1.0 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1 },
+ new double[] { 2, 2, 2, 2, 2, 2 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(3.0, optimum.getPointRef()[2], 1.0e-10);
+ assertEquals(4.0, optimum.getPointRef()[3], 1.0e-10);
+ assertEquals(5.0, optimum.getPointRef()[4], 1.0e-10);
+ assertEquals(6.0, optimum.getPointRef()[5], 1.0e-10);
+
+ }
+
+ public void testRedundantEquations() throws ObjectiveException, OptimizationException {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0 },
+ { 1.0, -1.0 },
+ { 1.0, 3.0 }
+ }, new double[] { 3.0, 1.0, 5.0 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 },
+ new double[] { 1, 1 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(2.0, optimum.getPointRef()[0], 1.0e-10);
+ assertEquals(1.0, optimum.getPointRef()[1], 1.0e-10);
+
+ }
+
+ public void testInconsistentEquations() throws ObjectiveException, OptimizationException {
+ LinearProblem problem = new LinearProblem(new double[][] {
+ { 1.0, 1.0 },
+ { 1.0, -1.0 },
+ { 1.0, 3.0 }
+ }, new double[] { 3.0, 1.0, 4.0 });
+
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 1, 1 });
+ assertTrue(optimizer.getRMS() > 0.1);
+
+ }
+
+ public void testInconsistentSizes() throws ObjectiveException, OptimizationException {
+ LinearProblem problem =
+ new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } }, new double[] { -1, 1 });
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+
+ VectorialPointValuePair optimum =
+ optimizer.optimize(problem, problem.target, new double[] { 1, 1 }, new double[] { 0, 0 });
+ assertEquals(0, optimizer.getRMS(), 1.0e-10);
+ assertEquals(-1, optimum.getPoint()[0], 1.0e-10);
+ assertEquals(+1, optimum.getPoint()[1], 1.0e-10);
+
+ try {
+ optimizer.optimize(problem, problem.target,
+ new double[] { 1 },
+ new double[] { 0, 0 });
+ fail("an exception should have been thrown");
+ } catch (OptimizationException oe) {
+ // expected behavior
+ } catch (Exception e) {
+ fail("wrong exception caught");
+ }
+
+ try {
+ optimizer.optimize(problem, new double[] { 1 },
+ new double[] { 1 },
+ new double[] { 0, 0 });
+ fail("an exception should have been thrown");
+ } catch (ObjectiveException oe) {
+ // expected behavior
+ } catch (Exception e) {
+ fail("wrong exception caught");
+ }
+
+ }
+
+ public void testControlParameters() throws OptimizationException {
+ Circle circle = new Circle();
+ circle.addPoint( 30.0, 68.0);
+ circle.addPoint( 50.0, -6.0);
+ circle.addPoint(110.0, -20.0);
+ circle.addPoint( 35.0, 15.0);
+ circle.addPoint( 45.0, 97.0);
+ checkEstimate(circle, 0.1, 10, 1.0e-14, 1.0e-16, 1.0e-10, false);
+ checkEstimate(circle, 0.1, 10, 1.0e-15, 1.0e-17, 1.0e-10, true);
+ checkEstimate(circle, 0.1, 5, 1.0e-15, 1.0e-16, 1.0e-10, true);
+ circle.addPoint(300, -300);
+ checkEstimate(circle, 0.1, 20, 1.0e-18, 1.0e-16, 1.0e-10, true);
+ }
+
+ private void checkEstimate(VectorialDifferentiableObjectiveFunction problem,
+ double initialStepBoundFactor, int maxCostEval,
+ double costRelativeTolerance, double parRelativeTolerance,
+ double orthoTolerance, boolean shouldFail) {
+ try {
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.setInitialStepBoundFactor(initialStepBoundFactor);
+ optimizer.setMaxEvaluations(maxCostEval);
+ optimizer.setCostRelativeTolerance(costRelativeTolerance);
+ optimizer.setParRelativeTolerance(parRelativeTolerance);
+ optimizer.setOrthoTolerance(orthoTolerance);
+ optimizer.optimize(problem, new double[] { 0, 0, 0, 0, 0 }, new double[] { 1, 1, 1, 1, 1 },
+ new double[] { 98.680, 47.345 });
+ assertTrue(! shouldFail);
+ } catch (OptimizationException ee) {
+ assertTrue(shouldFail);
+ } catch (ObjectiveException ee) {
+ assertTrue(shouldFail);
+ } catch (Exception e) {
+ fail("wrong exception type caught");
+ }
+ }
+
+ public void testCircleFitting() throws ObjectiveException, OptimizationException {
+ Circle circle = new Circle();
+ circle.addPoint( 30.0, 68.0);
+ circle.addPoint( 50.0, -6.0);
+ circle.addPoint(110.0, -20.0);
+ circle.addPoint( 35.0, 15.0);
+ circle.addPoint( 45.0, 97.0);
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ VectorialPointValuePair optimum =
+ optimizer.optimize(circle, new double[] { 0, 0, 0, 0, 0 }, new double[] { 1, 1, 1, 1, 1 },
+ new double[] { 98.680, 47.345 });
+ assertTrue(optimizer.getEvaluations() < 10);
+ assertTrue(optimizer.getJacobianEvaluations() < 10);
+ double rms = optimizer.getRMS();
+ assertEquals(1.768262623567235, Math.sqrt(circle.getN()) * rms, 1.0e-10);
+ Point2D.Double center = new Point2D.Double(optimum.getPointRef()[0], optimum.getPointRef()[1]);
+ assertEquals(69.96016176931406, circle.getRadius(center), 1.0e-10);
+ assertEquals(96.07590211815305, center.x, 1.0e-10);
+ assertEquals(48.13516790438953, center.y, 1.0e-10);
+ double[][] cov = optimizer.getCovariances();
+ assertEquals(1.839, cov[0][0], 0.001);
+ assertEquals(0.731, cov[0][1], 0.001);
+ assertEquals(cov[0][1], cov[1][0], 1.0e-14);
+ assertEquals(0.786, cov[1][1], 0.001);
+ double[] errors = optimizer.guessParametersErrors();
+ assertEquals(1.384, errors[0], 0.001);
+ assertEquals(0.905, errors[1], 0.001);
+
+ // add perfect measurements and check errors are reduced
+ double r = circle.getRadius(center);
+ for (double d= 0; d < 2 * Math.PI; d += 0.01) {
+ circle.addPoint(center.x + r * Math.cos(d), center.y + r * Math.sin(d));
+ }
+ double[] target = new double[circle.getN()];
+ Arrays.fill(target, 0.0);
+ double[] weights = new double[circle.getN()];
+ Arrays.fill(weights, 2.0);
+ optimum =
+ optimizer.optimize(circle, target, weights, new double[] { 98.680, 47.345 });
+ cov = optimizer.getCovariances();
+ assertEquals(0.0016, cov[0][0], 0.001);
+ assertEquals(3.2e-7, cov[0][1], 1.0e-9);
+ assertEquals(cov[0][1], cov[1][0], 1.0e-14);
+ assertEquals(0.0016, cov[1][1], 0.001);
+ errors = optimizer.guessParametersErrors();
+ assertEquals(0.002, errors[0], 0.001);
+ assertEquals(0.002, errors[1], 0.001);
+
+ }
+
+ public void testCircleFittingBadInit() throws ObjectiveException, OptimizationException {
+ Circle circle = new Circle();
+ double[][] points = new double[][] {
+ {-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
+ {-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
+ {-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
+ {-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
+ { 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
+ { 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
+ {-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
+ {-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
+ {-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
+ {-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
+ {-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
+ { 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
+ { 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
+ {-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
+ {-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
+ {-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
+ {-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
+ {-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
+ { 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
+ { 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
+ { 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
+ {-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
+ {-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
+ {-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
+ {-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
+ {-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
+ { 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
+ { 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
+ {-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
+ };
+ double[] target = new double[points.length];
+ Arrays.fill(target, 0.0);
+ double[] weights = new double[points.length];
+ Arrays.fill(weights, 2.0);
+ for (int i = 0; i < points.length; ++i) {
+ circle.addPoint(points[i][0], points[i][1]);
+ }
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-10, 1.0e-10));
+ VectorialPointValuePair optimum =
+ optimizer.optimize(circle, target, weights, new double[] { -12, -12 });
+ Point2D.Double center = new Point2D.Double(optimum.getPointRef()[0], optimum.getPointRef()[1]);
+ assertTrue(optimizer.getEvaluations() < 25);
+ assertTrue(optimizer.getJacobianEvaluations() < 20);
+ assertEquals( 0.043, optimizer.getRMS(), 1.0e-3);
+ assertEquals( 0.292235, circle.getRadius(center), 1.0e-6);
+ assertEquals(-0.151738, center.x, 1.0e-6);
+ assertEquals( 0.2075001, center.y, 1.0e-6);
+ }
+
+ public void testMath199() throws ObjectiveException, OptimizationException {
+ try {
+ QuadraticProblem problem = new QuadraticProblem();
+ problem.addPoint (0, -3.182591015485607);
+ problem.addPoint (1, -2.5581184967730577);
+ problem.addPoint (2, -2.1488478161387325);
+ problem.addPoint (3, -1.9122489313410047);
+ problem.addPoint (4, 1.7785661310051026);
+ new LevenbergMarquardtOptimizer().optimize(problem,
+ new double[] { 0, 0, 0, 0, 0 },
+ new double[] { 0.0, 4.4e-323, 1.0, 4.4e-323, 0.0 },
+ new double[] { 0, 0, 0 });
+ fail("an exception should have been thrown");
+ } catch (OptimizationException ee) {
+ // expected behavior
+ }
+
+ }
+
+ private static class LinearProblem implements VectorialDifferentiableObjectiveFunction {
+
+ private static final long serialVersionUID = 703247177355019415L;
+ final RealMatrix factors;
+ final double[] target;
+ public LinearProblem(double[][] factors, double[] target) {
+ this.factors = new DenseRealMatrix(factors);
+ this.target = target;
+ }
+
+ public double[][] jacobian(double[] variables, double[] value) {
+ return factors.getData();
+ }
+
+ public double[] objective(double[] variables) {
+ return factors.operate(variables);
+ }
+
+ }
+
+ private static class Circle implements VectorialDifferentiableObjectiveFunction {
+
+ private static final long serialVersionUID = -4711170319243817874L;
+
+ private ArrayList points;
+
+ public Circle() {
+ points = new ArrayList();
+ }
+
+ public void addPoint(double px, double py) {
+ points.add(new Point2D.Double(px, py));
+ }
+
+ public int getN() {
+ return points.size();
+ }
+
+ public double getRadius(Point2D.Double center) {
+ double r = 0;
+ for (Point2D.Double point : points) {
+ r += point.distance(center);
+ }
+ return r / points.size();
+ }
+
+ public double[][] jacobian(double[] variables, double[] value)
+ throws ObjectiveException, IllegalArgumentException {
+
+ int n = points.size();
+ Point2D.Double center = new Point2D.Double(variables[0], variables[1]);
+
+ // gradient of the optimal radius
+ double dRdX = 0;
+ double dRdY = 0;
+ for (Point2D.Double pk : points) {
+ double dk = pk.distance(center);
+ dRdX += (center.x - pk.x) / dk;
+ dRdY += (center.y - pk.y) / dk;
+ }
+ dRdX /= n;
+ dRdY /= n;
+
+ // jacobian of the radius residuals
+ double[][] jacobian = new double[n][2];
+ for (int i = 0; i < n; ++i) {
+ Point2D.Double pi = points.get(i);
+ double di = pi.distance(center);
+ jacobian[i][0] = (center.x - pi.x) / di - dRdX;
+ jacobian[i][1] = (center.y - pi.y) / di - dRdY;
+ }
+
+ return jacobian;
+
+ }
+
+ public double[] objective(double[] variables)
+ throws ObjectiveException, IllegalArgumentException {
+
+ Point2D.Double center = new Point2D.Double(variables[0], variables[1]);
+ double radius = getRadius(center);
+
+ double[] residuals = new double[points.size()];
+ for (int i = 0; i < residuals.length; ++i) {
+ residuals[i] = points.get(i).distance(center) - radius;
+ }
+
+ return residuals;
+
+ }
+
+ }
+
+ private static class QuadraticProblem implements VectorialDifferentiableObjectiveFunction {
+
+ private static final long serialVersionUID = -247096133023967957L;
+ private List x;
+ private List y;
+
+ public QuadraticProblem() {
+ x = new ArrayList();
+ y = new ArrayList();
+ }
+
+ public void addPoint(double x, double y) {
+ this.x.add(x);
+ this.y.add(y);
+ }
+
+ public double[][] jacobian(double[] variables, double[] value) {
+ double[][] jacobian = new double[x.size()][3];
+ for (int i = 0; i < jacobian.length; ++i) {
+ jacobian[i][0] = x.get(i) * x.get(i);
+ jacobian[i][1] = x.get(i);
+ jacobian[i][2] = 1.0;
+ }
+ return jacobian;
+ }
+
+ public double[] objective(double[] variables) {
+ double[] values = new double[x.size()];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = (variables[0] * x.get(i) + variables[1]) * x.get(i) + variables[2];
+ }
+ return values;
+ }
+
+ }
+
+ public static Test suite() {
+ return new TestSuite(LevenbergMarquardtOptimizerTest.class);
+ }
+
+}
diff --git a/src/test/org/apache/commons/math/optimization/general/MinpackTest.java b/src/test/org/apache/commons/math/optimization/general/MinpackTest.java
index 48e99b575..c3f451e05 100644
--- a/src/test/org/apache/commons/math/optimization/general/MinpackTest.java
+++ b/src/test/org/apache/commons/math/optimization/general/MinpackTest.java
@@ -19,7 +19,10 @@ package org.apache.commons.math.optimization.general;
import java.util.Arrays;
+import org.apache.commons.math.optimization.ObjectiveException;
import org.apache.commons.math.optimization.OptimizationException;
+import org.apache.commons.math.optimization.VectorialDifferentiableObjectiveFunction;
+import org.apache.commons.math.optimization.VectorialPointValuePair;
import junit.framework.*;
@@ -86,8 +89,7 @@ import junit.framework.*;
* @author Jorge J. More (original fortran minpack tests)
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
*/
-public class MinpackTest
- extends TestCase {
+public class MinpackTest extends TestCase {
public MinpackTest(String name) {
super(name);
@@ -502,157 +504,117 @@ public class MinpackTest
}
private void minpackTest(MinpackFunction function, boolean exceptionExpected) {
- LevenbergMarquardtEstimator estimator = new LevenbergMarquardtEstimator();
- estimator.setMaxCostEval(100 * (function.getN() + 1));
- estimator.setCostRelativeTolerance(Math.sqrt(2.22044604926e-16));
- estimator.setParRelativeTolerance(Math.sqrt(2.22044604926e-16));
- estimator.setOrthoTolerance(2.22044604926e-16);
- assertTrue(function.checkTheoreticalStartCost(estimator.getRMS(function)));
- try {
- estimator.estimate(function);
- assertFalse(exceptionExpected);
- } catch (OptimizationException lsse) {
- assertTrue(exceptionExpected);
- }
- assertTrue(function.checkTheoreticalMinCost(estimator.getRMS(function)));
- assertTrue(function.checkTheoreticalMinParams());
+ LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
+ optimizer.setMaxEvaluations(100 * (function.getN() + 1));
+ optimizer.setCostRelativeTolerance(Math.sqrt(2.22044604926e-16));
+ optimizer.setParRelativeTolerance(Math.sqrt(2.22044604926e-16));
+ optimizer.setOrthoTolerance(2.22044604926e-16);
+// assertTrue(function.checkTheoreticalStartCost(optimizer.getRMS()));
+ try {
+ VectorialPointValuePair optimum =
+ optimizer.optimize(function,
+ function.getTarget(), function.getWeight(),
+ function.getStartPoint());
+ assertFalse(exceptionExpected);
+ assertTrue(function.checkTheoreticalMinCost(optimizer.getRMS()));
+ assertTrue(function.checkTheoreticalMinParams(optimum));
+ } catch (OptimizationException lsse) {
+ assertTrue(exceptionExpected);
+ } catch (ObjectiveException oe) {
+ assertTrue(exceptionExpected);
+ }
}
- private static abstract class MinpackFunction implements EstimationProblem {
+ private static abstract class MinpackFunction
+ implements VectorialDifferentiableObjectiveFunction {
- protected MinpackFunction(int m,
- double[] startParams,
- double theoreticalStartCost,
- double theoreticalMinCost,
- double[] theoreticalMinParams) {
- this.m = m;
- this.n = startParams.length;
- parameters = new EstimatedParameter[n];
- for (int i = 0; i < n; ++i) {
- parameters[i] = new EstimatedParameter("p" + i, startParams[i]);
+ private static final long serialVersionUID = -6209760235478794233L;
+ protected int n;
+ protected int m;
+ protected double[] startParams;
+ protected double theoreticalMinCost;
+ protected double[] theoreticalMinParams;
+ protected double costAccuracy;
+ protected double paramsAccuracy;
+
+ protected MinpackFunction(int m, double[] startParams,
+ double theoreticalMinCost, double[] theoreticalMinParams) {
+ this.m = m;
+ this.n = startParams.length;
+ this.startParams = startParams.clone();
+ this.theoreticalMinCost = theoreticalMinCost;
+ this.theoreticalMinParams = theoreticalMinParams;
+ this.costAccuracy = 1.0e-8;
+ this.paramsAccuracy = 1.0e-5;
}
- this.theoreticalStartCost = theoreticalStartCost;
- this.theoreticalMinCost = theoreticalMinCost;
- this.theoreticalMinParams = theoreticalMinParams;
- this.costAccuracy = 1.0e-8;
- this.paramsAccuracy = 1.0e-5;
- }
- protected static double[] buildArray(int n, double x) {
- double[] array = new double[n];
- Arrays.fill(array, x);
- return array;
- }
+ protected static double[] buildArray(int n, double x) {
+ double[] array = new double[n];
+ Arrays.fill(array, x);
+ return array;
+ }
- protected void setCostAccuracy(double costAccuracy) {
- this.costAccuracy = costAccuracy;
- }
+ public double[] getTarget() {
+ return buildArray(m, 0.0);
+ }
- protected void setParamsAccuracy(double paramsAccuracy) {
- this.paramsAccuracy = paramsAccuracy;
- }
+ public double[] getWeight() {
+ return buildArray(m, 1.0);
+ }
- public int getN() {
- return parameters.length;
- }
+ public double[] getStartPoint() {
+ return startParams.clone();
+ }
- public boolean checkTheoreticalStartCost(double rms) {
- double threshold = costAccuracy * (1.0 + theoreticalStartCost);
- return Math.abs(Math.sqrt(m) * rms - theoreticalStartCost) <= threshold;
- }
+ protected void setCostAccuracy(double costAccuracy) {
+ this.costAccuracy = costAccuracy;
+ }
- public boolean checkTheoreticalMinCost(double rms) {
- double threshold = costAccuracy * (1.0 + theoreticalMinCost);
- return Math.abs(Math.sqrt(m) * rms - theoreticalMinCost) <= threshold;
- }
+ protected void setParamsAccuracy(double paramsAccuracy) {
+ this.paramsAccuracy = paramsAccuracy;
+ }
- public boolean checkTheoreticalMinParams() {
- if (theoreticalMinParams != null) {
- for (int i = 0; i < theoreticalMinParams.length; ++i) {
- double mi = theoreticalMinParams[i];
- double vi = parameters[i].getEstimate();
- if (Math.abs(mi - vi) > (paramsAccuracy * (1.0 + Math.abs(mi)))) {
- return false;
+ public int getN() {
+ return startParams.length;
+ }
+
+ public boolean checkTheoreticalMinCost(double rms) {
+ double threshold = costAccuracy * (1.0 + theoreticalMinCost);
+ return Math.abs(Math.sqrt(m) * rms - theoreticalMinCost) <= threshold;
+ }
+
+ public boolean checkTheoreticalMinParams(VectorialPointValuePair optimum) {
+ double[] params = optimum.getPointRef();
+ if (theoreticalMinParams != null) {
+ for (int i = 0; i < theoreticalMinParams.length; ++i) {
+ double mi = theoreticalMinParams[i];
+ double vi = params[i];
+ if (Math.abs(mi - vi) > (paramsAccuracy * (1.0 + Math.abs(mi)))) {
+ return false;
+ }
+ }
}
- }
- }
- return true;
- }
-
- public WeightedMeasurement[] getMeasurements() {
- WeightedMeasurement[] measurements = new WeightedMeasurement[m];
- for (int i = 0; i < m; ++i) {
- measurements[i] = new MinpackMeasurement(i);
- }
- return measurements;
- }
-
- public EstimatedParameter[] getUnboundParameters() {
- return parameters;
- }
-
- public EstimatedParameter[] getAllParameters() {
- return parameters;
- }
-
- protected abstract double[][] getJacobian();
-
- protected abstract double[] getResiduals();
-
- private class MinpackMeasurement extends WeightedMeasurement {
-
- public MinpackMeasurement(int index) {
- super(1.0, 0.0);
- this.index = index;
+ return true;
}
- public double getTheoreticalValue() {
- // this is obviously NOT efficient as we recompute the whole vector
- // each time we need only one element, but it is only for test
- // purposes and is simpler to check.
- // This implementation should NOT be taken as an example, it is ugly!
- return getResiduals()[index];
- }
+ public abstract double[][] jacobian(double[] variables, double[] value);
- public double getPartial(EstimatedParameter parameter) {
- // this is obviously NOT efficient as we recompute the whole jacobian
- // each time we need only one element, but it is only for test
- // purposes and is simpler to check.
- // This implementation should NOT be taken as an example, it is ugly!
- for (int j = 0; j < n; ++j) {
- if (parameter == parameters[j]) {
- return getJacobian()[index][j];
- }
- }
- return 0;
- }
-
- private int index;
- private static final long serialVersionUID = 1L;
-
- }
-
- protected int n;
- protected int m;
- protected EstimatedParameter[] parameters;
- protected double theoreticalStartCost;
- protected double theoreticalMinCost;
- protected double[] theoreticalMinParams;
- protected double costAccuracy;
- protected double paramsAccuracy;
+ public abstract double[] objective(double[] variables);
}
private static class LinearFullRankFunction extends MinpackFunction {
+ private static final long serialVersionUID = -9030323226268039536L;
+
public LinearFullRankFunction(int m, int n, double x0,
double theoreticalStartCost,
double theoreticalMinCost) {
- super(m, buildArray(n, x0), theoreticalStartCost,
- theoreticalMinCost, buildArray(n, -1.0));
+ super(m, buildArray(n, x0), theoreticalMinCost,
+ buildArray(n, -1.0));
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double t = 2.0 / m;
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
@@ -664,15 +626,15 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double sum = 0;
for (int i = 0; i < n; ++i) {
- sum += parameters[i].getEstimate();
+ sum += variables[i];
}
double t = 1 + 2 * sum / m;
double[] f = new double[m];
for (int i = 0; i < n; ++i) {
- f[i] = parameters[i].getEstimate() - t;
+ f[i] = variables[i] - t;
}
Arrays.fill(f, n, m, -t);
return f;
@@ -682,13 +644,15 @@ public class MinpackTest
private static class LinearRank1Function extends MinpackFunction {
+ private static final long serialVersionUID = 8494863245104608300L;
+
public LinearRank1Function(int m, int n, double x0,
double theoreticalStartCost,
double theoreticalMinCost) {
- super(m, buildArray(n, x0), theoreticalStartCost, theoreticalMinCost, null);
+ super(m, buildArray(n, x0), theoreticalMinCost, null);
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
jacobian[i] = new double[n];
@@ -699,11 +663,11 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double[] f = new double[m];
double sum = 0;
for (int i = 0; i < n; ++i) {
- sum += (i + 1) * parameters[i].getEstimate();
+ sum += (i + 1) * variables[i];
}
for (int i = 0; i < m; ++i) {
f[i] = (i + 1) * sum - 1;
@@ -715,14 +679,15 @@ public class MinpackTest
private static class LinearRank1ZeroColsAndRowsFunction extends MinpackFunction {
+ private static final long serialVersionUID = -3316653043091995018L;
+
public LinearRank1ZeroColsAndRowsFunction(int m, int n, double x0) {
super(m, buildArray(n, x0),
- Math.sqrt(m + (n+1)*(n-2)*(m-2)*(m-1) * ((n+1)*(n-2)*(2*m-3) - 12) / 24.0),
Math.sqrt((m * (m + 3) - 6) / (2.0 * (2 * m - 3))),
null);
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
jacobian[i] = new double[n];
@@ -741,11 +706,11 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double[] f = new double[m];
double sum = 0;
for (int i = 1; i < (n - 1); ++i) {
- sum += (i + 1) * parameters[i].getEstimate();
+ sum += (i + 1) * variables[i];
}
for (int i = 0; i < (m - 1); ++i) {
f[i] = i * sum - 1;
@@ -758,18 +723,20 @@ public class MinpackTest
private static class RosenbrockFunction extends MinpackFunction {
+ private static final long serialVersionUID = 2893438180956569134L;
+
public RosenbrockFunction(double[] startParams, double theoreticalStartCost) {
- super(2, startParams, theoreticalStartCost, 0.0, buildArray(2, 1.0));
+ super(2, startParams, 0.0, buildArray(2, 1.0));
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
return new double[][] { { -20 * x1, 10 }, { -1, 0 } };
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
return new double[] { 10 * (x2 - x1 * x1), 1 - x1 };
}
@@ -777,15 +744,16 @@ public class MinpackTest
private static class HelicalValleyFunction extends MinpackFunction {
+ private static final long serialVersionUID = 220613787843200102L;
+
public HelicalValleyFunction(double[] startParams,
double theoreticalStartCost) {
- super(3, startParams, theoreticalStartCost, 0.0,
- new double[] { 1.0, 0.0, 0.0 });
+ super(3, startParams, 0.0, new double[] { 1.0, 0.0, 0.0 });
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
double tmpSquare = x1 * x1 + x2 * x2;
double tmp1 = twoPi * tmpSquare;
double tmp2 = Math.sqrt(tmpSquare);
@@ -796,10 +764,10 @@ public class MinpackTest
};
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
double tmp1;
if (x1 == 0) {
tmp1 = (x2 >= 0) ? 0.25 : -0.25;
@@ -823,16 +791,18 @@ public class MinpackTest
private static class PowellSingularFunction extends MinpackFunction {
+ private static final long serialVersionUID = 7298364171208142405L;
+
public PowellSingularFunction(double[] startParams,
double theoreticalStartCost) {
- super(4, startParams, theoreticalStartCost, 0.0, buildArray(4, 0.0));
+ super(4, startParams, 0.0, buildArray(4, 0.0));
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
return new double[][] {
{ 1, 10, 0, 0 },
{ 0, 0, sqrt5, -sqrt5 },
@@ -841,11 +811,11 @@ public class MinpackTest
};
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
return new double[] {
x1 + 10 * x2,
sqrt5 * (x3 - x4),
@@ -861,25 +831,27 @@ public class MinpackTest
private static class FreudensteinRothFunction extends MinpackFunction {
+ private static final long serialVersionUID = 2892404999344244214L;
+
public FreudensteinRothFunction(double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(2, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(2, startParams, theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x2 = parameters[1].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x2 = variables[1];
return new double[][] {
{ 1, x2 * (10 - 3 * x2) - 2 },
{ 1, x2 * ( 2 + 3 * x2) - 14, }
};
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
return new double[] {
-13.0 + x1 + ((5.0 - x2) * x2 - 2.0) * x2,
-29.0 + x1 + ((1.0 + x2) * x2 - 14.0) * x2
@@ -890,17 +862,19 @@ public class MinpackTest
private static class BardFunction extends MinpackFunction {
+ private static final long serialVersionUID = 5990442612572087668L;
+
public BardFunction(double x0,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(15, buildArray(3, x0), theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(15, buildArray(3, x0), theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x2 = variables[1];
+ double x3 = variables[2];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double tmp1 = i + 1;
@@ -913,10 +887,10 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double tmp1 = i + 1;
@@ -937,23 +911,25 @@ public class MinpackTest
private static class KowalikOsborneFunction extends MinpackFunction {
+ private static final long serialVersionUID = -4867445739880495801L;
+
public KowalikOsborneFunction(double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(11, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(11, startParams, theoreticalMinCost,
+ theoreticalMinParams);
if (theoreticalStartCost > 20.0) {
setCostAccuracy(2.0e-4);
setParamsAccuracy(5.0e-3);
}
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double tmp = v[i] * (v[i] + x3) + x4;
@@ -966,11 +942,11 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
f[i] = y[i] - x1 * (v[i] * (v[i] + x2)) / (v[i] * (v[i] + x3) + x4);
@@ -991,22 +967,24 @@ public class MinpackTest
private static class MeyerFunction extends MinpackFunction {
+ private static final long serialVersionUID = -838060619150131027L;
+
public MeyerFunction(double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(16, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(16, startParams, theoreticalMinCost,
+ theoreticalMinParams);
if (theoreticalStartCost > 1.0e6) {
setCostAccuracy(7.0e-3);
setParamsAccuracy(2.0e-2);
}
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double temp = 5.0 * (i + 1) + 45.0 + x3;
@@ -1018,10 +996,10 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
f[i] = x1 * Math.exp(x2 / (5.0 * (i + 1) + 45.0 + x3)) - y[i];
@@ -1040,15 +1018,17 @@ public class MinpackTest
private static class WatsonFunction extends MinpackFunction {
+ private static final long serialVersionUID = -9034759294980218927L;
+
public WatsonFunction(int n, double x0,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(31, buildArray(n, x0), theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(31, buildArray(n, x0), theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double[][] jacobian = new double[m][];
@@ -1057,7 +1037,7 @@ public class MinpackTest
double s2 = 0.0;
double dx = 1.0;
for (int j = 0; j < n; ++j) {
- s2 += dx * parameters[j].getEstimate();
+ s2 += dx * variables[j];
dx *= div;
}
double temp= 2 * div * s2;
@@ -1073,34 +1053,34 @@ public class MinpackTest
jacobian[m - 2][0] = 1;
jacobian[m - 1] = new double[n];
- jacobian[m - 1][0]= -2 * parameters[0].getEstimate();
+ jacobian[m - 1][0]= -2 * variables[0];
jacobian[m - 1][1]= 1;
return jacobian;
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double[] f = new double[m];
for (int i = 0; i < (m - 2); ++i) {
double div = (i + 1) / 29.0;
double s1 = 0;
double dx = 1;
for (int j = 1; j < n; ++j) {
- s1 += j * dx * parameters[j].getEstimate();
+ s1 += j * dx * variables[j];
dx *= div;
}
double s2 =0;
dx =1;
for (int j = 0; j < n; ++j) {
- s2 += dx * parameters[j].getEstimate();
+ s2 += dx * variables[j];
dx *= div;
}
f[i] = s1 - s2 * s2 - 1;
}
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ double x1 = variables[0];
+ double x2 = variables[1];
f[m - 2] = x1;
f[m - 1] = x2 - x1 * x1 - 1;
@@ -1112,15 +1092,17 @@ public class MinpackTest
private static class Box3DimensionalFunction extends MinpackFunction {
+ private static final long serialVersionUID = 5511403858142574493L;
+
public Box3DimensionalFunction(int m, double[] startParams,
double theoreticalStartCost) {
- super(m, startParams, theoreticalStartCost,
- 0.0, new double[] { 1.0, 10.0, 1.0 });
+ super(m, startParams, 0.0,
+ new double[] { 1.0, 10.0, 1.0 });
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double tmp = (i + 1) / 10.0;
@@ -1133,10 +1115,10 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double tmp = (i + 1) / 10.0;
@@ -1150,17 +1132,19 @@ public class MinpackTest
private static class JennrichSampsonFunction extends MinpackFunction {
+ private static final long serialVersionUID = -2489165190443352947L;
+
public JennrichSampsonFunction(int m, double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(m, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(m, startParams, theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double t = i + 1;
@@ -1169,9 +1153,9 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double temp = i + 1;
@@ -1184,19 +1168,21 @@ public class MinpackTest
private static class BrownDennisFunction extends MinpackFunction {
+ private static final long serialVersionUID = 8340018645694243910L;
+
public BrownDennisFunction(int m, double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(m, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(m, startParams, theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double temp = (i + 1) / 5.0;
@@ -1210,11 +1196,11 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double temp = (i + 1) / 5.0;
@@ -1229,6 +1215,8 @@ public class MinpackTest
private static class ChebyquadFunction extends MinpackFunction {
+ private static final long serialVersionUID = -2394877275028008594L;
+
private static double[] buildChebyquadArray(int n, double factor) {
double[] array = new double[n];
double inv = factor / (n + 1);
@@ -1242,11 +1230,11 @@ public class MinpackTest
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(m, buildChebyquadArray(n, factor), theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(m, buildChebyquadArray(n, factor), theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
@@ -1256,7 +1244,7 @@ public class MinpackTest
double dx = 1.0 / n;
for (int j = 0; j < n; ++j) {
double tmp1 = 1;
- double tmp2 = 2 * parameters[j].getEstimate() - 1;
+ double tmp2 = 2 * variables[j] - 1;
double temp = 2 * tmp2;
double tmp3 = 0;
double tmp4 = 2;
@@ -1275,13 +1263,13 @@ public class MinpackTest
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double[] f = new double[m];
for (int j = 0; j < n; ++j) {
double tmp1 = 1;
- double tmp2 = 2 * parameters[j].getEstimate() - 1;
+ double tmp2 = 2 * variables[j] - 1;
double temp = 2 * tmp2;
for (int i = 0; i < m; ++i) {
f[i] += tmp2;
@@ -1309,15 +1297,17 @@ public class MinpackTest
private static class BrownAlmostLinearFunction extends MinpackFunction {
+ private static final long serialVersionUID = 8239594490466964725L;
+
public BrownAlmostLinearFunction(int m, double factor,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(m, buildArray(m, factor), theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(m, buildArray(m, factor), theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
+ public double[][] jacobian(double[] variables, double[] value) {
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
jacobian[i] = new double[n];
@@ -1325,7 +1315,7 @@ public class MinpackTest
double prod = 1;
for (int j = 0; j < n; ++j) {
- prod *= parameters[j].getEstimate();
+ prod *= variables[j];
for (int i = 0; i < n; ++i) {
jacobian[i][j] = 1;
}
@@ -1333,14 +1323,13 @@ public class MinpackTest
}
for (int j = 0; j < n; ++j) {
- EstimatedParameter vj = parameters[j];
- double temp = vj.getEstimate();
+ double temp = variables[j];
if (temp == 0) {
temp = 1;
prod = 1;
for (int k = 0; k < n; ++k) {
if (k != j) {
- prod *= parameters[k].getEstimate();
+ prod *= variables[k];
}
}
}
@@ -1351,16 +1340,16 @@ public class MinpackTest
}
- protected double[] getResiduals() {
+ public double[] objective(double[] variables) {
double[] f = new double[m];
double sum = -(n + 1);
double prod = 1;
for (int j = 0; j < n; ++j) {
- sum += parameters[j].getEstimate();
- prod *= parameters[j].getEstimate();
+ sum += variables[j];
+ prod *= variables[j];
}
for (int i = 0; i < n; ++i) {
- f[i] = parameters[i].getEstimate() + sum;
+ f[i] = variables[i] + sum;
}
f[n - 1] = prod - 1;
return f;
@@ -1370,19 +1359,21 @@ public class MinpackTest
private static class Osborne1Function extends MinpackFunction {
+ private static final long serialVersionUID = 4006743521149849494L;
+
public Osborne1Function(double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(33, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(33, startParams, theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
- double x5 = parameters[4].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double x5 = variables[4];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double temp = 10.0 * i;
@@ -1395,12 +1386,12 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x1 = parameters[0].getEstimate();
- double x2 = parameters[1].getEstimate();
- double x3 = parameters[2].getEstimate();
- double x4 = parameters[3].getEstimate();
- double x5 = parameters[4].getEstimate();
+ public double[] objective(double[] variables) {
+ double x1 = variables[0];
+ double x2 = variables[1];
+ double x3 = variables[2];
+ double x4 = variables[3];
+ double x5 = variables[4];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double temp = 10.0 * i;
@@ -1421,26 +1412,28 @@ public class MinpackTest
private static class Osborne2Function extends MinpackFunction {
+ private static final long serialVersionUID = -8418268780389858746L;
+
public Osborne2Function(double[] startParams,
double theoreticalStartCost,
double theoreticalMinCost,
double[] theoreticalMinParams) {
- super(65, startParams, theoreticalStartCost,
- theoreticalMinCost, theoreticalMinParams);
+ super(65, startParams, theoreticalMinCost,
+ theoreticalMinParams);
}
- protected double[][] getJacobian() {
- double x01 = parameters[0].getEstimate();
- double x02 = parameters[1].getEstimate();
- double x03 = parameters[2].getEstimate();
- double x04 = parameters[3].getEstimate();
- double x05 = parameters[4].getEstimate();
- double x06 = parameters[5].getEstimate();
- double x07 = parameters[6].getEstimate();
- double x08 = parameters[7].getEstimate();
- double x09 = parameters[8].getEstimate();
- double x10 = parameters[9].getEstimate();
- double x11 = parameters[10].getEstimate();
+ public double[][] jacobian(double[] variables, double[] value) {
+ double x01 = variables[0];
+ double x02 = variables[1];
+ double x03 = variables[2];
+ double x04 = variables[3];
+ double x05 = variables[4];
+ double x06 = variables[5];
+ double x07 = variables[6];
+ double x08 = variables[7];
+ double x09 = variables[8];
+ double x10 = variables[9];
+ double x11 = variables[10];
double[][] jacobian = new double[m][];
for (int i = 0; i < m; ++i) {
double temp = i / 10.0;
@@ -1465,18 +1458,18 @@ public class MinpackTest
return jacobian;
}
- protected double[] getResiduals() {
- double x01 = parameters[0].getEstimate();
- double x02 = parameters[1].getEstimate();
- double x03 = parameters[2].getEstimate();
- double x04 = parameters[3].getEstimate();
- double x05 = parameters[4].getEstimate();
- double x06 = parameters[5].getEstimate();
- double x07 = parameters[6].getEstimate();
- double x08 = parameters[7].getEstimate();
- double x09 = parameters[8].getEstimate();
- double x10 = parameters[9].getEstimate();
- double x11 = parameters[10].getEstimate();
+ public double[] objective(double[] variables) {
+ double x01 = variables[0];
+ double x02 = variables[1];
+ double x03 = variables[2];
+ double x04 = variables[3];
+ double x05 = variables[4];
+ double x06 = variables[5];
+ double x07 = variables[6];
+ double x08 = variables[7];
+ double x09 = variables[8];
+ double x10 = variables[9];
+ double x11 = variables[10];
double[] f = new double[m];
for (int i = 0; i < m; ++i) {
double temp = i / 10.0;
diff --git a/src/test/org/apache/commons/math/optimization/general/WeightedMeasurementTest.java b/src/test/org/apache/commons/math/optimization/general/WeightedMeasurementTest.java
deleted file mode 100644
index 18018189d..000000000
--- a/src/test/org/apache/commons/math/optimization/general/WeightedMeasurementTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.commons.math.optimization.general;
-
-
-import junit.framework.*;
-
-public class WeightedMeasurementTest
- extends TestCase {
-
- public WeightedMeasurementTest(String name) {
- super(name);
- p1 = null;
- p2 = null;
- }
-
- public void testConstruction() {
- WeightedMeasurement m = new MyMeasurement(3.0, theoretical() + 0.1, this);
- checkValue(m.getWeight(), 3.0);
- checkValue(m.getMeasuredValue(), theoretical() + 0.1);
- }
-
- public void testIgnored() {
- WeightedMeasurement m = new MyMeasurement(3.0, theoretical() + 0.1, this);
- assertTrue(!m.isIgnored());
- m.setIgnored(true);
- assertTrue(m.isIgnored());
- m.setIgnored(false);
- assertTrue(!m.isIgnored());
- }
-
- public void testTheory() {
- WeightedMeasurement m = new MyMeasurement(3.0, theoretical() + 0.1, this);
- checkValue(m.getTheoreticalValue(), theoretical());
- checkValue(m.getResidual(), 0.1);
-
- double oldP1 = p1.getEstimate();
- p1.setEstimate(oldP1 + m.getResidual() / m.getPartial(p1));
- checkValue(m.getResidual(), 0.0);
- p1.setEstimate(oldP1);
- checkValue(m.getResidual(), 0.1);
-
- double oldP2 = p2.getEstimate();
- p2.setEstimate(oldP2 + m.getResidual() / m.getPartial(p2));
- checkValue(m.getResidual(), 0.0);
- p2.setEstimate(oldP2);
- checkValue(m.getResidual(), 0.1);
-
- }
-
- public static Test suite() {
- return new TestSuite(WeightedMeasurementTest.class);
- }
-
- public void setUp() {
- p1 = new EstimatedParameter("p1", 1.0);
- p2 = new EstimatedParameter("p2", 2.0);
- }
-
- public void tearDown() {
- p1 = null;
- p2 = null;
- }
-
- private void checkValue(double value, double expected) {
- assertTrue(Math.abs(value - expected) < 1.0e-10);
- }
-
- private double theoretical() {
- return 3 * p1.getEstimate() - p2.getEstimate();
- }
-
- private double partial(EstimatedParameter p) {
- if (p == p1) {
- return 3.0;
- } else if (p == p2) {
- return -1.0;
- } else {
- return 0.0;
- }
- }
-
- private static class MyMeasurement
- extends WeightedMeasurement {
-
- public MyMeasurement(double weight, double measuredValue,
- WeightedMeasurementTest testInstance) {
- super(weight, measuredValue);
- this.testInstance = testInstance;
- }
-
- public double getTheoreticalValue() {
- return testInstance.theoretical();
- }
-
- public double getPartial(EstimatedParameter p) {
- return testInstance.partial(p);
- }
-
- private transient WeightedMeasurementTest testInstance;
-
- private static final long serialVersionUID = -246712922500792332L;
-
- }
-
- private EstimatedParameter p1;
- private EstimatedParameter p2;
-
-}