adapted the Gauss-Newton optimizer to the new top-level optimization interfaces
git-svn-id: https://svn.apache.org/repos/asf/commons/proper/math/trunk@754500 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
27c1eb726a
commit
3a0df1ba48
|
@ -0,0 +1,319 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math.optimization.general;
|
||||
|
||||
import org.apache.commons.math.linear.InvalidMatrixException;
|
||||
import org.apache.commons.math.linear.MatrixUtils;
|
||||
import org.apache.commons.math.linear.RealMatrix;
|
||||
import org.apache.commons.math.linear.decomposition.LUDecompositionImpl;
|
||||
import org.apache.commons.math.optimization.ObjectiveException;
|
||||
import org.apache.commons.math.optimization.OptimizationException;
|
||||
import org.apache.commons.math.optimization.SimpleVectorialValueChecker;
|
||||
import org.apache.commons.math.optimization.VectorialConvergenceChecker;
|
||||
import org.apache.commons.math.optimization.VectorialDifferentiableObjectiveFunction;
|
||||
import org.apache.commons.math.optimization.VectorialDifferentiableOptimizer;
|
||||
import org.apache.commons.math.optimization.VectorialPointValuePair;
|
||||
|
||||
/**
|
||||
* Base class for implementing estimators.
|
||||
* <p>This base class handles the boilerplates methods associated to thresholds
|
||||
* settings, jacobian and error estimation.</p>
|
||||
* @version $Revision$ $Date$
|
||||
* @since 1.2
|
||||
*
|
||||
*/
|
||||
public abstract class AbstractLeastSquaresOptimizer implements VectorialDifferentiableOptimizer {
|
||||
|
||||
/** Serializable version identifier */
|
||||
private static final long serialVersionUID = -3080152374642370722L;
|
||||
|
||||
/** Default maximal number of objective function evaluations allowed. */
|
||||
public static final int DEFAULT_MAX_EVALUATIONS = 100;
|
||||
|
||||
/** Number of evaluations already performed for the current start. */
|
||||
private int objectiveEvaluations;
|
||||
|
||||
/** Number of jacobian evaluations. */
|
||||
private int jacobianEvaluations;
|
||||
|
||||
/** Maximal number of evaluations allowed. */
|
||||
private int maxEvaluations;
|
||||
|
||||
/** Convergence checker. */
|
||||
protected VectorialConvergenceChecker checker;
|
||||
|
||||
/**
|
||||
* Jacobian matrix.
|
||||
* <p>This matrix is in canonical form just after the calls to
|
||||
* {@link #updateJacobian()}, but may be modified by the solver
|
||||
* in the derived class (the {@link LevenbergMarquardtEstimator
|
||||
* Levenberg-Marquardt estimator} does this).</p>
|
||||
*/
|
||||
protected double[][] jacobian;
|
||||
|
||||
/** Number of columns of the jacobian matrix. */
|
||||
protected int cols;
|
||||
|
||||
/** Number of rows of the jacobian matrix. */
|
||||
protected int rows;
|
||||
|
||||
/** Objective function. */
|
||||
private VectorialDifferentiableObjectiveFunction f;
|
||||
|
||||
/** Target value for the objective functions at optimum. */
|
||||
protected double[] target;
|
||||
|
||||
/** Weight for the least squares cost computation. */
|
||||
protected double[] weights;
|
||||
|
||||
/** Current variables set. */
|
||||
protected double[] variables;
|
||||
|
||||
/** Current objective function value. */
|
||||
protected double[] objective;
|
||||
|
||||
/** Cost value (square root of the sum of the residuals). */
|
||||
protected double cost;
|
||||
|
||||
/** Simple constructor with default settings.
|
||||
* <p>The convergence check is set to a {@link SimpleVectorialValueChecker}
|
||||
* and the maximal number of evaluation is set to its default value.</p>
|
||||
*/
|
||||
protected AbstractLeastSquaresOptimizer() {
|
||||
setConvergenceChecker(new SimpleVectorialValueChecker());
|
||||
setMaxEvaluations(DEFAULT_MAX_EVALUATIONS);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void setMaxEvaluations(int maxEvaluations) {
|
||||
this.maxEvaluations = maxEvaluations;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public int getMaxEvaluations() {
|
||||
return maxEvaluations;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public int getEvaluations() {
|
||||
return objectiveEvaluations;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void setConvergenceChecker(VectorialConvergenceChecker checker) {
|
||||
this.checker = checker;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public VectorialConvergenceChecker getConvergenceChecker() {
|
||||
return checker;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the jacobian matrix.
|
||||
* @exception ObjectiveException if the function jacobian
|
||||
* cannot be evaluated or its dimension doesn't match problem dimension
|
||||
*/
|
||||
protected void updateJacobian() throws ObjectiveException {
|
||||
incrementJacobianEvaluationsCounter();
|
||||
jacobian = f.jacobian(variables, objective);
|
||||
if (jacobian.length != rows) {
|
||||
throw new ObjectiveException("dimension mismatch {0} != {1}",
|
||||
jacobian.length, rows);
|
||||
}
|
||||
for (int i = 0; i < rows; i++) {
|
||||
final double[] ji = jacobian[i];
|
||||
final double factor = -Math.sqrt(weights[i]);
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
ji[j] *= factor;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the jacobian evaluations counter.
|
||||
*/
|
||||
protected final void incrementJacobianEvaluationsCounter() {
|
||||
++jacobianEvaluations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the residuals array and cost function value.
|
||||
* @exception ObjectiveException if the function cannot be evaluated
|
||||
* or its dimension doesn't match problem dimension
|
||||
* @exception OptimizationException if the number of cost evaluations
|
||||
* exceeds the maximum allowed
|
||||
*/
|
||||
protected void updateResidualsAndCost()
|
||||
throws ObjectiveException, OptimizationException {
|
||||
|
||||
if (++objectiveEvaluations > maxEvaluations) {
|
||||
throw new OptimizationException(
|
||||
"maximal number of evaluations exceeded ({0})",
|
||||
objectiveEvaluations);
|
||||
}
|
||||
|
||||
objective = f.objective(variables);
|
||||
if (objective.length != rows) {
|
||||
throw new ObjectiveException("dimension mismatch {0} != {1}",
|
||||
objective.length, rows);
|
||||
}
|
||||
cost = 0;
|
||||
for (int i = 0, index = 0; i < rows; i++, index += cols) {
|
||||
final double residual = objective[i] - target[i];
|
||||
cost += weights[i] * residual * residual;
|
||||
}
|
||||
cost = Math.sqrt(cost);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Root Mean Square value.
|
||||
* Get the Root Mean Square value, i.e. the root of the arithmetic
|
||||
* mean of the square of all weighted residuals. This is related to the
|
||||
* criterion that is minimized by the estimator as follows: if
|
||||
* <em>c</em> if the criterion, and <em>n</em> is the number of
|
||||
* measurements, then the RMS is <em>sqrt (c/n)</em>.
|
||||
*
|
||||
* @return RMS value
|
||||
*/
|
||||
public double getRMS() {
|
||||
double criterion = 0;
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
final double residual = objective[i] - target[i];
|
||||
criterion += weights[i] * residual * residual;
|
||||
}
|
||||
return Math.sqrt(criterion / rows);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Chi-Square value.
|
||||
* @return chi-square value
|
||||
*/
|
||||
public double getChiSquare() {
|
||||
double chiSquare = 0;
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
final double residual = objective[i] - target[i];
|
||||
chiSquare += residual * residual / weights[i];
|
||||
}
|
||||
return chiSquare;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the covariance matrix of unbound estimated parameters.
|
||||
* @return covariance matrix
|
||||
* @exception ObjectiveException if the function jacobian cannot
|
||||
* be evaluated
|
||||
* @exception OptimizationException if the covariance matrix
|
||||
* cannot be computed (singular problem)
|
||||
*/
|
||||
public double[][] getCovariances()
|
||||
throws ObjectiveException, OptimizationException {
|
||||
|
||||
// set up the jacobian
|
||||
updateJacobian();
|
||||
|
||||
// compute transpose(J).J, avoiding building big intermediate matrices
|
||||
double[][] jTj = new double[cols][cols];
|
||||
for (int i = 0; i < cols; ++i) {
|
||||
final double[] ji = jacobian[i];
|
||||
for (int j = i; j < cols; ++j) {
|
||||
final double[] jj = jacobian[j];
|
||||
double sum = 0;
|
||||
for (int k = 0; k < rows; ++k) {
|
||||
sum += ji[k] * jj[k];
|
||||
}
|
||||
jTj[i][j] = sum;
|
||||
jTj[j][i] = sum;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// compute the covariances matrix
|
||||
RealMatrix inverse =
|
||||
new LUDecompositionImpl(MatrixUtils.createRealMatrix(jTj)).getSolver().getInverse();
|
||||
return inverse.getData();
|
||||
} catch (InvalidMatrixException ime) {
|
||||
throw new OptimizationException("unable to compute covariances: singular problem");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Guess the errors in unbound estimated parameters.
|
||||
* <p>Guessing is covariance-based, it only gives rough order of magnitude.</p>
|
||||
* @return errors in estimated parameters
|
||||
* @exception ObjectiveException if the function jacobian cannot b evaluated
|
||||
* @exception OptimizationException if the covariances matrix cannot be computed
|
||||
* or the number of degrees of freedom is not positive (number of measurements
|
||||
* lesser or equal to number of parameters)
|
||||
*/
|
||||
public double[] guessParametersErrors()
|
||||
throws ObjectiveException, OptimizationException {
|
||||
if (rows <= cols) {
|
||||
throw new OptimizationException(
|
||||
"no degrees of freedom ({0} measurements, {1} parameters)",
|
||||
rows, cols);
|
||||
}
|
||||
double[] errors = new double[cols];
|
||||
final double c = Math.sqrt(getChiSquare() / (rows - cols));
|
||||
double[][] covar = getCovariances();
|
||||
for (int i = 0; i < errors.length; ++i) {
|
||||
errors[i] = Math.sqrt(covar[i][i]) * c;
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public VectorialPointValuePair optimize(final VectorialDifferentiableObjectiveFunction f,
|
||||
final double[] target, final double[] weights,
|
||||
final double[] startPoint)
|
||||
throws ObjectiveException, OptimizationException, IllegalArgumentException {
|
||||
|
||||
if (target.length != weights.length) {
|
||||
throw new OptimizationException("dimension mismatch {0} != {1}",
|
||||
target.length, weights.length);
|
||||
}
|
||||
|
||||
// reset counters
|
||||
objectiveEvaluations = 0;
|
||||
jacobianEvaluations = 0;
|
||||
|
||||
// store least squares problem characteristics
|
||||
this.f = f;
|
||||
this.target = target;
|
||||
this.weights = weights;
|
||||
this.variables = startPoint.clone();
|
||||
|
||||
// arrays shared with the other private methods
|
||||
rows = target.length;
|
||||
cols = variables.length;
|
||||
jacobian = new double[rows][cols];
|
||||
|
||||
cost = Double.POSITIVE_INFINITY;
|
||||
|
||||
return doOptimize();
|
||||
|
||||
}
|
||||
|
||||
/** Perform the bulk of optimization algorithm.
|
||||
*/
|
||||
abstract protected VectorialPointValuePair doOptimize()
|
||||
throws ObjectiveException, OptimizationException, IllegalArgumentException;
|
||||
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math.optimization.general;
|
||||
|
||||
import org.apache.commons.math.linear.DenseRealMatrix;
|
||||
import org.apache.commons.math.linear.InvalidMatrixException;
|
||||
import org.apache.commons.math.linear.RealMatrix;
|
||||
import org.apache.commons.math.linear.decomposition.DecompositionSolver;
|
||||
import org.apache.commons.math.linear.decomposition.LUDecompositionImpl;
|
||||
import org.apache.commons.math.linear.decomposition.QRDecompositionImpl;
|
||||
import org.apache.commons.math.optimization.ObjectiveException;
|
||||
import org.apache.commons.math.optimization.OptimizationException;
|
||||
import org.apache.commons.math.optimization.SimpleVectorialValueChecker;
|
||||
import org.apache.commons.math.optimization.VectorialPointValuePair;
|
||||
|
||||
/**
|
||||
* Gauss-Newton least-squares solver.
|
||||
* <p>
|
||||
* This class solve a least-square problem by solving the normal equations
|
||||
* of the linearized problem at each iteration. Either LU decomposition or
|
||||
* QR decomposition can be used to solve the normal equations. LU decomposition
|
||||
* is faster but QR decomposition is more robust for difficult problems.
|
||||
* </p>
|
||||
*
|
||||
* @version $Revision$ $Date$
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
|
||||
public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer {
|
||||
|
||||
/** Serializable version identifier */
|
||||
private static final long serialVersionUID = 7011643996279553223L;
|
||||
|
||||
/** Indicator for using LU decomposition. */
|
||||
private final boolean useLU;
|
||||
|
||||
/** Simple constructor with default settings.
|
||||
* <p>The convergence check is set to a {@link SimpleVectorialValueChecker}
|
||||
* and the maximal number of evaluation is set to
|
||||
* {@link AbstractLeastSquaresOptimizer#DEFAULT_MAX_EVALUATIONS}.
|
||||
* @param useLU if true, the normal equations will be solved using LU
|
||||
* decomposition, otherwise it will be solved using QR decomposition
|
||||
*/
|
||||
public GaussNewtonOptimizer(final boolean useLU) {
|
||||
this.useLU = useLU;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public VectorialPointValuePair doOptimize()
|
||||
throws ObjectiveException, OptimizationException, IllegalArgumentException {
|
||||
|
||||
// iterate until convergence is reached
|
||||
VectorialPointValuePair current = null;
|
||||
boolean converged = false;
|
||||
for (int iteration = 1; ! converged; ++iteration) {
|
||||
|
||||
// evaluate the objective function and its jacobian
|
||||
VectorialPointValuePair previous = current;
|
||||
updateResidualsAndCost();
|
||||
updateJacobian();
|
||||
current = new VectorialPointValuePair(variables, objective);
|
||||
|
||||
// build the linear problem
|
||||
final double[] b = new double[cols];
|
||||
final double[][] a = new double[cols][cols];
|
||||
for (int i = 0; i < rows; ++i) {
|
||||
|
||||
final double[] grad = jacobian[i];
|
||||
final double weight = weights[i];
|
||||
final double residual = objective[i] - target[i];
|
||||
|
||||
// compute the normal equation
|
||||
final double wr = weight * residual;
|
||||
for (int j = 0; j < cols; ++j) {
|
||||
b[j] += wr * grad[j];
|
||||
}
|
||||
|
||||
// build the contribution matrix for measurement i
|
||||
for (int k = 0; k < cols; ++k) {
|
||||
double[] ak = a[k];
|
||||
double wgk = weight * grad[k];
|
||||
for (int l = 0; l < cols; ++l) {
|
||||
ak[l] += wgk * grad[l];
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
// solve the linearized least squares problem
|
||||
RealMatrix mA = new DenseRealMatrix(a);
|
||||
DecompositionSolver solver = useLU ?
|
||||
new LUDecompositionImpl(mA).getSolver() :
|
||||
new QRDecompositionImpl(mA).getSolver();
|
||||
final double[] dX = solver.solve(b);
|
||||
|
||||
// update the estimated parameters
|
||||
for (int i = 0; i < cols; ++i) {
|
||||
variables[i] += dX[i];
|
||||
}
|
||||
|
||||
} catch(InvalidMatrixException e) {
|
||||
throw new OptimizationException("unable to solve: singular problem");
|
||||
}
|
||||
|
||||
// check convergence
|
||||
if (previous != null) {
|
||||
converged = checker.converged(++iteration, previous, current);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// we have converged
|
||||
return current;
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,734 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math.optimization.general;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
|
||||
import org.apache.commons.math.optimization.OptimizationException;
|
||||
|
||||
|
||||
import junit.framework.Test;
|
||||
import junit.framework.TestCase;
|
||||
import junit.framework.TestSuite;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
public class GaussNewtonEstimatorTest
|
||||
extends TestCase {
|
||||
|
||||
public GaussNewtonEstimatorTest(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
public void testTrivial() throws OptimizationException {
|
||||
LinearProblem problem =
|
||||
new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] {2},
|
||||
new EstimatedParameter[] {
|
||||
new EstimatedParameter("p0", 0)
|
||||
}, 3.0)
|
||||
});
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
assertEquals(1.5,
|
||||
problem.getUnboundParameters()[0].getEstimate(),
|
||||
1.0e-10);
|
||||
}
|
||||
|
||||
public void testQRColumnsPermutation() throws OptimizationException {
|
||||
|
||||
EstimatedParameter[] x = {
|
||||
new EstimatedParameter("p0", 0), new EstimatedParameter("p1", 0)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0 },
|
||||
new EstimatedParameter[] { x[0], x[1] },
|
||||
4.0),
|
||||
new LinearMeasurement(new double[] { 2.0 },
|
||||
new EstimatedParameter[] { x[1] },
|
||||
6.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -2.0 },
|
||||
new EstimatedParameter[] { x[0], x[1] },
|
||||
1.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
assertEquals(7.0, x[0].getEstimate(), 1.0e-10);
|
||||
assertEquals(3.0, x[1].getEstimate(), 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testNoDependency() throws OptimizationException {
|
||||
EstimatedParameter[] p = new EstimatedParameter[] {
|
||||
new EstimatedParameter("p0", 0),
|
||||
new EstimatedParameter("p1", 0),
|
||||
new EstimatedParameter("p2", 0),
|
||||
new EstimatedParameter("p3", 0),
|
||||
new EstimatedParameter("p4", 0),
|
||||
new EstimatedParameter("p5", 0)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[0] }, 0.0),
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[1] }, 1.1),
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[2] }, 2.2),
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[3] }, 3.3),
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[4] }, 4.4),
|
||||
new LinearMeasurement(new double[] {2}, new EstimatedParameter[] { p[5] }, 5.5)
|
||||
});
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
for (int i = 0; i < p.length; ++i) {
|
||||
assertEquals(0.55 * i, p[i].getEstimate(), 1.0e-10);
|
||||
}
|
||||
}
|
||||
|
||||
public void testOneSet() throws OptimizationException {
|
||||
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 0),
|
||||
new EstimatedParameter("p1", 0),
|
||||
new EstimatedParameter("p2", 0)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0 },
|
||||
new EstimatedParameter[] { p[0] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { -1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { -1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[1], p[2] },
|
||||
1.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
assertEquals(1.0, p[0].getEstimate(), 1.0e-10);
|
||||
assertEquals(2.0, p[1].getEstimate(), 1.0e-10);
|
||||
assertEquals(3.0, p[2].getEstimate(), 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testTwoSets() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 0),
|
||||
new EstimatedParameter("p1", 1),
|
||||
new EstimatedParameter("p2", 2),
|
||||
new EstimatedParameter("p3", 3),
|
||||
new EstimatedParameter("p4", 4),
|
||||
new EstimatedParameter("p5", 5)
|
||||
};
|
||||
|
||||
double epsilon = 1.0e-7;
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
|
||||
// 4 elements sub-problem
|
||||
new LinearMeasurement(new double[] { 2.0, 1.0, 4.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[3] },
|
||||
2.0),
|
||||
new LinearMeasurement(new double[] { -4.0, -2.0, 3.0, -7.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
-9.0),
|
||||
new LinearMeasurement(new double[] { 4.0, 1.0, -2.0, 8.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
2.0),
|
||||
new LinearMeasurement(new double[] { -3.0, -12.0, -1.0 },
|
||||
new EstimatedParameter[] { p[1], p[2], p[3] },
|
||||
2.0),
|
||||
|
||||
// 2 elements sub-problem
|
||||
new LinearMeasurement(new double[] { epsilon, 1.0 },
|
||||
new EstimatedParameter[] { p[4], p[5] },
|
||||
1.0 + epsilon * epsilon),
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[4], p[5] },
|
||||
2.0)
|
||||
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
assertEquals( 3.0, p[0].getEstimate(), 1.0e-10);
|
||||
assertEquals( 4.0, p[1].getEstimate(), 1.0e-10);
|
||||
assertEquals(-1.0, p[2].getEstimate(), 1.0e-10);
|
||||
assertEquals(-2.0, p[3].getEstimate(), 1.0e-10);
|
||||
assertEquals( 1.0 + epsilon, p[4].getEstimate(), 1.0e-10);
|
||||
assertEquals( 1.0 - epsilon, p[5].getEstimate(), 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testNonInversible() throws OptimizationException {
|
||||
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 0),
|
||||
new EstimatedParameter("p1", 0),
|
||||
new EstimatedParameter("p2", 0)
|
||||
};
|
||||
LinearMeasurement[] m = new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, 2.0, -3.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { 2.0, 1.0, 3.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { -3.0, -9.0 },
|
||||
new EstimatedParameter[] { p[0], p[2] },
|
||||
1.0)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(m);
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
try {
|
||||
estimator.estimate(problem);
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
public void testIllConditioned() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 0),
|
||||
new EstimatedParameter("p1", 1),
|
||||
new EstimatedParameter("p2", 2),
|
||||
new EstimatedParameter("p3", 3)
|
||||
};
|
||||
|
||||
LinearProblem problem1 = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 10.0, 7.0, 8.0, 7.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
32.0),
|
||||
new LinearMeasurement(new double[] { 7.0, 5.0, 6.0, 5.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
23.0),
|
||||
new LinearMeasurement(new double[] { 8.0, 6.0, 10.0, 9.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
33.0),
|
||||
new LinearMeasurement(new double[] { 7.0, 5.0, 9.0, 10.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
31.0)
|
||||
});
|
||||
GaussNewtonEstimator estimator1 = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator1.estimate(problem1);
|
||||
assertEquals(0, estimator1.getRMS(problem1), 1.0e-10);
|
||||
assertEquals(1.0, p[0].getEstimate(), 1.0e-10);
|
||||
assertEquals(1.0, p[1].getEstimate(), 1.0e-10);
|
||||
assertEquals(1.0, p[2].getEstimate(), 1.0e-10);
|
||||
assertEquals(1.0, p[3].getEstimate(), 1.0e-10);
|
||||
|
||||
LinearProblem problem2 = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 10.0, 7.0, 8.1, 7.2 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
32.0),
|
||||
new LinearMeasurement(new double[] { 7.08, 5.04, 6.0, 5.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
23.0),
|
||||
new LinearMeasurement(new double[] { 8.0, 5.98, 9.89, 9.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
33.0),
|
||||
new LinearMeasurement(new double[] { 6.99, 4.99, 9.0, 9.98 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2], p[3] },
|
||||
31.0)
|
||||
});
|
||||
GaussNewtonEstimator estimator2 = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator2.estimate(problem2);
|
||||
assertEquals(0, estimator2.getRMS(problem2), 1.0e-10);
|
||||
assertEquals(-81.0, p[0].getEstimate(), 1.0e-8);
|
||||
assertEquals(137.0, p[1].getEstimate(), 1.0e-8);
|
||||
assertEquals(-34.0, p[2].getEstimate(), 1.0e-8);
|
||||
assertEquals( 22.0, p[3].getEstimate(), 1.0e-8);
|
||||
|
||||
}
|
||||
|
||||
public void testMoreEstimatedParametersSimple() throws OptimizationException {
|
||||
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 7),
|
||||
new EstimatedParameter("p1", 6),
|
||||
new EstimatedParameter("p2", 5),
|
||||
new EstimatedParameter("p3", 4)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 3.0, 2.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
7.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[1], p[2], p[3] },
|
||||
3.0),
|
||||
new LinearMeasurement(new double[] { 2.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[2] },
|
||||
5.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
try {
|
||||
estimator.estimate(problem);
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testMoreEstimatedParametersUnsorted() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 2),
|
||||
new EstimatedParameter("p1", 2),
|
||||
new EstimatedParameter("p2", 2),
|
||||
new EstimatedParameter("p3", 2),
|
||||
new EstimatedParameter("p4", 2),
|
||||
new EstimatedParameter("p5", 2)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
3.0),
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[2], p[3], p[4] },
|
||||
12.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0 },
|
||||
new EstimatedParameter[] { p[4], p[5] },
|
||||
-1.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[3], p[2], p[5] },
|
||||
7.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0 },
|
||||
new EstimatedParameter[] { p[4], p[3] },
|
||||
1.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
try {
|
||||
estimator.estimate(problem);
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testRedundantEquations() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 1),
|
||||
new EstimatedParameter("p1", 1)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
3.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { 1.0, 3.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
5.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertEquals(0, estimator.getRMS(problem), 1.0e-10);
|
||||
EstimatedParameter[] all = problem.getAllParameters();
|
||||
for (int i = 0; i < all.length; ++i) {
|
||||
assertEquals(all[i].getName().equals("p0") ? 2.0 : 1.0,
|
||||
all[i].getEstimate(), 1.0e-10);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testInconsistentEquations() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("p0", 1),
|
||||
new EstimatedParameter("p1", 1)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
3.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { 1.0, 3.0 },
|
||||
new EstimatedParameter[] { p[0], p[1] },
|
||||
4.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertTrue(estimator.getRMS(problem) > 0.1);
|
||||
|
||||
}
|
||||
|
||||
public void testBoundParameters() throws OptimizationException {
|
||||
EstimatedParameter[] p = {
|
||||
new EstimatedParameter("unbound0", 2, false),
|
||||
new EstimatedParameter("unbound1", 2, false),
|
||||
new EstimatedParameter("bound", 2, true)
|
||||
};
|
||||
LinearProblem problem = new LinearProblem(new LinearMeasurement[] {
|
||||
new LinearMeasurement(new double[] { 1.0, 1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2] },
|
||||
3.0),
|
||||
new LinearMeasurement(new double[] { 1.0, -1.0, 1.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2] },
|
||||
1.0),
|
||||
new LinearMeasurement(new double[] { 1.0, 3.0, 2.0 },
|
||||
new EstimatedParameter[] { p[0], p[1], p[2] },
|
||||
7.0)
|
||||
});
|
||||
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
estimator.estimate(problem);
|
||||
assertTrue(estimator.getRMS(problem) < 1.0e-10);
|
||||
double[][] covariances = estimator.getCovariances(problem);
|
||||
int i0 = 0, i1 = 1;
|
||||
if (problem.getUnboundParameters()[0].getName().endsWith("1")) {
|
||||
i0 = 1;
|
||||
i1 = 0;
|
||||
}
|
||||
assertEquals(11.0 / 24, covariances[i0][i0], 1.0e-10);
|
||||
assertEquals(-3.0 / 24, covariances[i0][i1], 1.0e-10);
|
||||
assertEquals(-3.0 / 24, covariances[i1][i0], 1.0e-10);
|
||||
assertEquals( 3.0 / 24, covariances[i1][i1], 1.0e-10);
|
||||
|
||||
double[] errors = estimator.guessParametersErrors(problem);
|
||||
assertEquals(0, errors[i0], 1.0e-10);
|
||||
assertEquals(0, errors[i1], 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testMaxIterations() {
|
||||
Circle circle = new Circle(98.680, 47.345);
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
try {
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(4, 1.0e-14, 1.0e-14);
|
||||
estimator.estimate(circle);
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
public void testCircleFitting() throws OptimizationException {
|
||||
Circle circle = new Circle(98.680, 47.345);
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-10, 1.0e-10);
|
||||
estimator.estimate(circle);
|
||||
double rms = estimator.getRMS(circle);
|
||||
assertEquals(1.768262623567235, Math.sqrt(circle.getM()) * rms, 1.0e-10);
|
||||
assertEquals(69.96016176931406, circle.getRadius(), 1.0e-10);
|
||||
assertEquals(96.07590211815305, circle.getX(), 1.0e-10);
|
||||
assertEquals(48.13516790438953, circle.getY(), 1.0e-10);
|
||||
}
|
||||
|
||||
public void testCircleFittingBadInit() throws OptimizationException {
|
||||
Circle circle = new Circle(-12, -12);
|
||||
double[][] points = new double[][] {
|
||||
{-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
|
||||
{-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
|
||||
{-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
|
||||
{-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
|
||||
{ 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
|
||||
{ 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
|
||||
{-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
|
||||
{-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
|
||||
{-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
|
||||
{-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
|
||||
{-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
|
||||
{ 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
|
||||
{ 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
|
||||
{-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
|
||||
{-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
|
||||
{-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
|
||||
{-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
|
||||
{-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
|
||||
{ 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
|
||||
{ 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
|
||||
{ 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
|
||||
{-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
|
||||
{-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
|
||||
{-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
|
||||
{-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
|
||||
{-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
|
||||
{ 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
|
||||
{ 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
|
||||
{-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
|
||||
};
|
||||
for (int i = 0; i < points.length; ++i) {
|
||||
circle.addPoint(points[i][0], points[i][1]);
|
||||
}
|
||||
GaussNewtonEstimator estimator = new GaussNewtonEstimator(100, 1.0e-6, 1.0e-6);
|
||||
try {
|
||||
estimator.estimate(circle);
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
private static class LinearProblem extends SimpleEstimationProblem {
|
||||
|
||||
public LinearProblem(LinearMeasurement[] measurements) {
|
||||
HashSet<EstimatedParameter> set = new HashSet<EstimatedParameter>();
|
||||
for (int i = 0; i < measurements.length; ++i) {
|
||||
addMeasurement(measurements[i]);
|
||||
EstimatedParameter[] parameters = measurements[i].getParameters();
|
||||
for (int j = 0; j < parameters.length; ++j) {
|
||||
set.add(parameters[j]);
|
||||
}
|
||||
}
|
||||
for (EstimatedParameter p : set) {
|
||||
addParameter(p);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class LinearMeasurement extends WeightedMeasurement {
|
||||
|
||||
public LinearMeasurement(double[] factors, EstimatedParameter[] parameters,
|
||||
double setPoint) {
|
||||
super(1.0, setPoint, true);
|
||||
this.factors = factors;
|
||||
this.parameters = parameters;
|
||||
setIgnored(false);
|
||||
}
|
||||
|
||||
public double getTheoreticalValue() {
|
||||
double v = 0;
|
||||
for (int i = 0; i < factors.length; ++i) {
|
||||
v += factors[i] * parameters[i].getEstimate();
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
public double getPartial(EstimatedParameter parameter) {
|
||||
for (int i = 0; i < parameters.length; ++i) {
|
||||
if (parameters[i] == parameter) {
|
||||
return factors[i];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public EstimatedParameter[] getParameters() {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
private double[] factors;
|
||||
private EstimatedParameter[] parameters;
|
||||
private static final long serialVersionUID = -3922448707008868580L;
|
||||
|
||||
}
|
||||
|
||||
private static class Circle implements EstimationProblem {
|
||||
|
||||
public Circle(double cx, double cy) {
|
||||
this.cx = new EstimatedParameter("cx", cx);
|
||||
this.cy = new EstimatedParameter(new EstimatedParameter("cy", cy));
|
||||
points = new ArrayList<PointModel>();
|
||||
}
|
||||
|
||||
public void addPoint(double px, double py) {
|
||||
points.add(new PointModel(px, py));
|
||||
}
|
||||
|
||||
public int getM() {
|
||||
return points.size();
|
||||
}
|
||||
|
||||
public WeightedMeasurement[] getMeasurements() {
|
||||
return (WeightedMeasurement[]) points.toArray(new PointModel[points.size()]);
|
||||
}
|
||||
|
||||
public EstimatedParameter[] getAllParameters() {
|
||||
return new EstimatedParameter[] { cx, cy };
|
||||
}
|
||||
|
||||
public EstimatedParameter[] getUnboundParameters() {
|
||||
return new EstimatedParameter[] { cx, cy };
|
||||
}
|
||||
|
||||
public double getPartialRadiusX() {
|
||||
double dRdX = 0;
|
||||
for (PointModel point : points) {
|
||||
dRdX += point.getPartialDiX();
|
||||
}
|
||||
return dRdX / points.size();
|
||||
}
|
||||
|
||||
public double getPartialRadiusY() {
|
||||
double dRdY = 0;
|
||||
for (PointModel point : points) {
|
||||
dRdY += point.getPartialDiY();
|
||||
}
|
||||
return dRdY / points.size();
|
||||
}
|
||||
|
||||
public double getRadius() {
|
||||
double r = 0;
|
||||
for (PointModel point : points) {
|
||||
r += point.getCenterDistance();
|
||||
}
|
||||
return r / points.size();
|
||||
}
|
||||
|
||||
public double getX() {
|
||||
return cx.getEstimate();
|
||||
}
|
||||
|
||||
public double getY() {
|
||||
return cy.getEstimate();
|
||||
}
|
||||
|
||||
private class PointModel extends WeightedMeasurement {
|
||||
|
||||
public PointModel(double px, double py) {
|
||||
super(1.0, 0.0);
|
||||
this.px = px;
|
||||
this.py = py;
|
||||
}
|
||||
|
||||
public double getPartial(EstimatedParameter parameter) {
|
||||
if (parameter == cx) {
|
||||
return getPartialDiX() - getPartialRadiusX();
|
||||
} else if (parameter == cy) {
|
||||
return getPartialDiY() - getPartialRadiusY();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public double getCenterDistance() {
|
||||
double dx = px - cx.getEstimate();
|
||||
double dy = py - cy.getEstimate();
|
||||
return Math.sqrt(dx * dx + dy * dy);
|
||||
}
|
||||
|
||||
public double getPartialDiX() {
|
||||
return (cx.getEstimate() - px) / getCenterDistance();
|
||||
}
|
||||
|
||||
public double getPartialDiY() {
|
||||
return (cy.getEstimate() - py) / getCenterDistance();
|
||||
}
|
||||
|
||||
public double getTheoreticalValue() {
|
||||
return getCenterDistance() - getRadius();
|
||||
}
|
||||
|
||||
private double px;
|
||||
private double py;
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
}
|
||||
|
||||
private EstimatedParameter cx;
|
||||
private EstimatedParameter cy;
|
||||
private ArrayList<PointModel> points;
|
||||
|
||||
}
|
||||
|
||||
public static Test suite() {
|
||||
return new TestSuite(GaussNewtonEstimatorTest.class);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,573 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math.optimization.general;
|
||||
|
||||
import java.awt.geom.Point2D;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
||||
import junit.framework.Test;
|
||||
import junit.framework.TestCase;
|
||||
import junit.framework.TestSuite;
|
||||
|
||||
import org.apache.commons.math.linear.DenseRealMatrix;
|
||||
import org.apache.commons.math.linear.RealMatrix;
|
||||
import org.apache.commons.math.optimization.ObjectiveException;
|
||||
import org.apache.commons.math.optimization.OptimizationException;
|
||||
import org.apache.commons.math.optimization.SimpleVectorialValueChecker;
|
||||
import org.apache.commons.math.optimization.VectorialDifferentiableObjectiveFunction;
|
||||
import org.apache.commons.math.optimization.VectorialPointValuePair;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
public class GaussNewtonOptimizerTest
|
||||
extends TestCase {
|
||||
|
||||
public GaussNewtonOptimizerTest(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
public void testTrivial() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem =
|
||||
new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1 }, new double[] { 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(1.5, optimum.getPoint()[0], 1.0e-10);
|
||||
}
|
||||
|
||||
public void testColumnsPermutation() throws ObjectiveException, OptimizationException {
|
||||
|
||||
LinearProblem problem =
|
||||
new LinearProblem(new double[][] { { 1.0, -1.0 }, { 0.0, 2.0 }, { 1.0, -2.0 } },
|
||||
new double[] { 4.0, 6.0, 1.0 });
|
||||
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(7.0, optimum.getPoint()[0], 1.0e-10);
|
||||
assertEquals(3.0, optimum.getPoint()[1], 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testNoDependency() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 2, 0, 0, 0, 0, 0 },
|
||||
{ 0, 2, 0, 0, 0, 0 },
|
||||
{ 0, 0, 2, 0, 0, 0 },
|
||||
{ 0, 0, 0, 2, 0, 0 },
|
||||
{ 0, 0, 0, 0, 2, 0 },
|
||||
{ 0, 0, 0, 0, 0, 2 }
|
||||
}, new double[] { 0.0, 1.1, 2.2, 3.3, 4.4, 5.5 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
|
||||
new double[] { 0, 0, 0, 0, 0, 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
for (int i = 0; i < problem.target.length; ++i) {
|
||||
assertEquals(0.55 * i, optimum.getPoint()[i], 1.0e-10);
|
||||
}
|
||||
}
|
||||
|
||||
public void testOneSet() throws ObjectiveException, OptimizationException {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 0, 0 },
|
||||
{ -1, 1, 0 },
|
||||
{ 0, -1, 1 }
|
||||
}, new double[] { 1, 1, 1});
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(1.0, optimum.getPoint()[0], 1.0e-10);
|
||||
assertEquals(2.0, optimum.getPoint()[1], 1.0e-10);
|
||||
assertEquals(3.0, optimum.getPoint()[2], 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testTwoSets() throws ObjectiveException, OptimizationException {
|
||||
double epsilon = 1.0e-7;
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 2, 1, 0, 4, 0, 0 },
|
||||
{ -4, -2, 3, -7, 0, 0 },
|
||||
{ 4, 1, -2, 8, 0, 0 },
|
||||
{ 0, -3, -12, -1, 0, 0 },
|
||||
{ 0, 0, 0, 0, epsilon, 1 },
|
||||
{ 0, 0, 0, 0, 1, 1 }
|
||||
}, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
|
||||
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1, 1 },
|
||||
new double[] { 0, 0, 0, 0, 0, 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals( 3.0, optimum.getPoint()[0], 1.0e-10);
|
||||
assertEquals( 4.0, optimum.getPoint()[1], 1.0e-10);
|
||||
assertEquals(-1.0, optimum.getPoint()[2], 1.0e-10);
|
||||
assertEquals(-2.0, optimum.getPoint()[3], 1.0e-10);
|
||||
assertEquals( 1.0 + epsilon, optimum.getPoint()[4], 1.0e-10);
|
||||
assertEquals( 1.0 - epsilon, optimum.getPoint()[5], 1.0e-10);
|
||||
|
||||
}
|
||||
|
||||
public void testNonInversible() throws OptimizationException {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 2, -3 },
|
||||
{ 2, 1, 3 },
|
||||
{ -3, 0, -9 }
|
||||
}, new double[] { 1, 1, 1 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
try {
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 0, 0, 0 });
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
public void testIllConditioned() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem1 = new LinearProblem(new double[][] {
|
||||
{ 10.0, 7.0, 8.0, 7.0 },
|
||||
{ 7.0, 5.0, 6.0, 5.0 },
|
||||
{ 8.0, 6.0, 10.0, 9.0 },
|
||||
{ 7.0, 5.0, 9.0, 10.0 }
|
||||
}, new double[] { 32, 23, 33, 31 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum1 =
|
||||
optimizer.optimize(problem1, problem1.target, new double[] { 1, 1, 1, 1 },
|
||||
new double[] { 0, 1, 2, 3 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(1.0, optimum1.getPoint()[0], 1.0e-10);
|
||||
assertEquals(1.0, optimum1.getPoint()[1], 1.0e-10);
|
||||
assertEquals(1.0, optimum1.getPoint()[2], 1.0e-10);
|
||||
assertEquals(1.0, optimum1.getPoint()[3], 1.0e-10);
|
||||
|
||||
LinearProblem problem2 = new LinearProblem(new double[][] {
|
||||
{ 10.00, 7.00, 8.10, 7.20 },
|
||||
{ 7.08, 5.04, 6.00, 5.00 },
|
||||
{ 8.00, 5.98, 9.89, 9.00 },
|
||||
{ 6.99, 4.99, 9.00, 9.98 }
|
||||
}, new double[] { 32, 23, 33, 31 });
|
||||
VectorialPointValuePair optimum2 =
|
||||
optimizer.optimize(problem2, problem2.target, new double[] { 1, 1, 1, 1 },
|
||||
new double[] { 0, 1, 2, 3 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(-81.0, optimum2.getPoint()[0], 1.0e-8);
|
||||
assertEquals(137.0, optimum2.getPoint()[1], 1.0e-8);
|
||||
assertEquals(-34.0, optimum2.getPoint()[2], 1.0e-8);
|
||||
assertEquals( 22.0, optimum2.getPoint()[3], 1.0e-8);
|
||||
|
||||
}
|
||||
|
||||
public void testMoreEstimatedParametersSimple() throws OptimizationException {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 3.0, 2.0, 0.0, 0.0 },
|
||||
{ 0.0, 1.0, -1.0, 1.0 },
|
||||
{ 2.0, 0.0, 1.0, 0.0 }
|
||||
}, new double[] { 7.0, 3.0, 5.0 });
|
||||
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
try {
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 },
|
||||
new double[] { 7, 6, 5, 4 });
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testMoreEstimatedParametersUnsorted() throws OptimizationException {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
|
||||
{ 0.0, 0.0, 1.0, 1.0, 1.0, 0.0 },
|
||||
{ 0.0, 0.0, 0.0, 0.0, 1.0, -1.0 },
|
||||
{ 0.0, 0.0, -1.0, 1.0, 0.0, 1.0 },
|
||||
{ 0.0, 0.0, 0.0, -1.0, 1.0, 0.0 }
|
||||
}, new double[] { 3.0, 12.0, -1.0, 7.0, 1.0 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
try {
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1, 1, 1 },
|
||||
new double[] { 2, 2, 2, 2, 2, 2 });
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
public void testRedundantEquations() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1.0, 1.0 },
|
||||
{ 1.0, -1.0 },
|
||||
{ 1.0, 3.0 }
|
||||
}, new double[] { 3.0, 1.0, 5.0 });
|
||||
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 },
|
||||
new double[] { 1, 1 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(2.0, optimum.getPoint()[0], 1.0e-8);
|
||||
assertEquals(1.0, optimum.getPoint()[1], 1.0e-8);
|
||||
|
||||
}
|
||||
|
||||
public void testInconsistentEquations() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1.0, 1.0 },
|
||||
{ 1.0, -1.0 },
|
||||
{ 1.0, 3.0 }
|
||||
}, new double[] { 3.0, 1.0, 4.0 });
|
||||
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1, 1 }, new double[] { 1, 1 });
|
||||
assertTrue(optimizer.getRMS() > 0.1);
|
||||
|
||||
}
|
||||
|
||||
public void testInconsistentSizes() throws ObjectiveException, OptimizationException {
|
||||
LinearProblem problem =
|
||||
new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } }, new double[] { -1, 1 });
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(problem, problem.target, new double[] { 1, 1 }, new double[] { 0, 0 });
|
||||
assertEquals(0, optimizer.getRMS(), 1.0e-10);
|
||||
assertEquals(-1, optimum.getPoint()[0], 1.0e-10);
|
||||
assertEquals(+1, optimum.getPoint()[1], 1.0e-10);
|
||||
|
||||
try {
|
||||
optimizer.optimize(problem, problem.target,
|
||||
new double[] { 1 },
|
||||
new double[] { 0, 0 });
|
||||
fail("an exception should have been thrown");
|
||||
} catch (OptimizationException oe) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception caught");
|
||||
}
|
||||
|
||||
try {
|
||||
optimizer.optimize(problem, new double[] { 1 },
|
||||
new double[] { 1 },
|
||||
new double[] { 0, 0 });
|
||||
fail("an exception should have been thrown");
|
||||
} catch (ObjectiveException oe) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception caught");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testMaxIterations() {
|
||||
Circle circle = new Circle();
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-15, 1.0e-15));
|
||||
try {
|
||||
optimizer.optimize(circle, new double[] { 0, 0, 0, 0, 0 },
|
||||
new double[] { 1, 1, 1, 1, 1 },
|
||||
new double[] { 98.680, 47.345 });
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
}
|
||||
|
||||
public void testCircleFitting() throws ObjectiveException, OptimizationException {
|
||||
Circle circle = new Circle();
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-13, 1.0e-13));
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(circle, new double[] { 0, 0, 0, 0, 0 },
|
||||
new double[] { 1, 1, 1, 1, 1 },
|
||||
new double[] { 98.680, 47.345 });
|
||||
assertEquals(1.768262623567235, Math.sqrt(circle.getN()) * optimizer.getRMS(), 1.0e-10);
|
||||
Point2D.Double center = new Point2D.Double(optimum.getPointRef()[0], optimum.getPointRef()[1]);
|
||||
assertEquals(69.96016175359975, circle.getRadius(center), 1.0e-10);
|
||||
assertEquals(96.07590209601095, center.x, 1.0e-10);
|
||||
assertEquals(48.135167894714, center.y, 1.0e-10);
|
||||
}
|
||||
|
||||
public void testCircleFittingBadInit() throws ObjectiveException, OptimizationException {
|
||||
Circle circle = new Circle();
|
||||
double[][] points = new double[][] {
|
||||
{-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
|
||||
{-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
|
||||
{-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
|
||||
{-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
|
||||
{ 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
|
||||
{ 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
|
||||
{-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
|
||||
{-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
|
||||
{-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
|
||||
{-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
|
||||
{-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
|
||||
{ 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
|
||||
{ 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
|
||||
{-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
|
||||
{-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
|
||||
{-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
|
||||
{-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
|
||||
{-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
|
||||
{ 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
|
||||
{ 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
|
||||
{ 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
|
||||
{-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
|
||||
{-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
|
||||
{-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
|
||||
{-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
|
||||
{-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
|
||||
{ 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
|
||||
{ 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
|
||||
{-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
|
||||
};
|
||||
double[] target = new double[points.length];
|
||||
Arrays.fill(target, 0.0);
|
||||
double[] weights = new double[points.length];
|
||||
Arrays.fill(weights, 2.0);
|
||||
for (int i = 0; i < points.length; ++i) {
|
||||
circle.addPoint(points[i][0], points[i][1]);
|
||||
}
|
||||
GaussNewtonOptimizer optimizer = new GaussNewtonOptimizer(true);
|
||||
optimizer.setMaxEvaluations(100);
|
||||
optimizer.setConvergenceChecker(new SimpleVectorialValueChecker(1.0e-6, 1.0e-6));
|
||||
try {
|
||||
optimizer.optimize(circle, target, weights, new double[] { -12, -12 });
|
||||
fail("an exception should have been caught");
|
||||
} catch (OptimizationException ee) {
|
||||
// expected behavior
|
||||
} catch (Exception e) {
|
||||
fail("wrong exception type caught");
|
||||
}
|
||||
|
||||
VectorialPointValuePair optimum =
|
||||
optimizer.optimize(circle, target, weights, new double[] { 0, 0 });
|
||||
assertEquals(-0.1517383071957963, optimum.getPointRef()[0], 1.0e-8);
|
||||
assertEquals(0.2074999736353867, optimum.getPointRef()[1], 1.0e-8);
|
||||
assertEquals(0.04268731682389561, optimizer.getRMS(), 1.0e-8);
|
||||
|
||||
}
|
||||
|
||||
private static class LinearProblem implements VectorialDifferentiableObjectiveFunction {
|
||||
|
||||
private static final long serialVersionUID = 703247177355019415L;
|
||||
final RealMatrix factors;
|
||||
final double[] target;
|
||||
public LinearProblem(double[][] factors, double[] target) {
|
||||
this.factors = new DenseRealMatrix(factors);
|
||||
this.target = target;
|
||||
}
|
||||
|
||||
public double[][] jacobian(double[] variables, double[] value) {
|
||||
return factors.getData();
|
||||
}
|
||||
|
||||
public double[] objective(double[] variables) {
|
||||
return factors.operate(variables);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class Circle implements VectorialDifferentiableObjectiveFunction {
|
||||
|
||||
private static final long serialVersionUID = -4711170319243817874L;
|
||||
|
||||
private ArrayList<Point2D.Double> points;
|
||||
|
||||
public Circle() {
|
||||
points = new ArrayList<Point2D.Double>();
|
||||
}
|
||||
|
||||
public void addPoint(double px, double py) {
|
||||
points.add(new Point2D.Double(px, py));
|
||||
}
|
||||
|
||||
public int getN() {
|
||||
return points.size();
|
||||
}
|
||||
|
||||
public double getRadius(Point2D.Double center) {
|
||||
double r = 0;
|
||||
for (Point2D.Double point : points) {
|
||||
r += point.distance(center);
|
||||
}
|
||||
return r / points.size();
|
||||
}
|
||||
|
||||
public double[][] jacobian(double[] variables, double[] value)
|
||||
throws ObjectiveException, IllegalArgumentException {
|
||||
|
||||
int n = points.size();
|
||||
Point2D.Double center = new Point2D.Double(variables[0], variables[1]);
|
||||
|
||||
// gradient of the optimal radius
|
||||
double dRdX = 0;
|
||||
double dRdY = 0;
|
||||
for (Point2D.Double pk : points) {
|
||||
double dk = pk.distance(center);
|
||||
dRdX += (center.x - pk.x) / dk;
|
||||
dRdY += (center.y - pk.y) / dk;
|
||||
}
|
||||
dRdX /= n;
|
||||
dRdY /= n;
|
||||
|
||||
// jacobian of the radius residuals
|
||||
double[][] jacobian = new double[n][2];
|
||||
for (int i = 0; i < n; ++i) {
|
||||
Point2D.Double pi = points.get(i);
|
||||
double di = pi.distance(center);
|
||||
jacobian[i][0] = (center.x - pi.x) / di - dRdX;
|
||||
jacobian[i][1] = (center.y - pi.y) / di - dRdY;
|
||||
}
|
||||
|
||||
return jacobian;
|
||||
|
||||
}
|
||||
|
||||
public double[] objective(double[] variables)
|
||||
throws ObjectiveException, IllegalArgumentException {
|
||||
|
||||
Point2D.Double center = new Point2D.Double(variables[0], variables[1]);
|
||||
double radius = getRadius(center);
|
||||
|
||||
double[] residuals = new double[points.size()];
|
||||
for (int i = 0; i < residuals.length; ++i) {
|
||||
residuals[i] = points.get(i).distance(center) - radius;
|
||||
}
|
||||
|
||||
return residuals;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static Test suite() {
|
||||
return new TestSuite(GaussNewtonOptimizerTest.class);
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue