Remove deprecated classes in optim package.
This commit is contained in:
parent
0737cf82db
commit
e31fde875c
|
@ -1,253 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.linear.BlockRealMatrix;
|
||||
import org.apache.commons.math4.linear.RealMatrix;
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.OptimizationData;
|
||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
||||
import org.apache.commons.math4.optim.SimpleBounds;
|
||||
import org.apache.commons.math4.optim.SimpleVectorValueChecker;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.JacobianMultivariateVectorOptimizer;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.MultiStartMultivariateVectorOptimizer;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.GaussNewtonOptimizer;
|
||||
import org.apache.commons.math4.random.GaussianRandomGenerator;
|
||||
import org.apache.commons.math4.random.JDKRandomGenerator;
|
||||
import org.apache.commons.math4.random.RandomVectorGenerator;
|
||||
import org.apache.commons.math4.random.UncorrelatedRandomVectorGenerator;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
*
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
*
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
@Deprecated
|
||||
public class MultiStartMultivariateVectorOptimizerTest {
|
||||
|
||||
@Test(expected=NullPointerException.class)
|
||||
public void testGetOptimaBeforeOptimize() {
|
||||
|
||||
JacobianMultivariateVectorOptimizer underlyingOptimizer
|
||||
= new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
|
||||
JDKRandomGenerator g = new JDKRandomGenerator();
|
||||
g.setSeed(16069223052l);
|
||||
RandomVectorGenerator generator
|
||||
= new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
|
||||
MultiStartMultivariateVectorOptimizer optimizer
|
||||
= new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
|
||||
|
||||
optimizer.getOptima();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTrivial() {
|
||||
LinearProblem problem
|
||||
= new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
|
||||
JacobianMultivariateVectorOptimizer underlyingOptimizer
|
||||
= new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
|
||||
JDKRandomGenerator g = new JDKRandomGenerator();
|
||||
g.setSeed(16069223052l);
|
||||
RandomVectorGenerator generator
|
||||
= new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
|
||||
MultiStartMultivariateVectorOptimizer optimizer
|
||||
= new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
|
||||
|
||||
PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0 }));
|
||||
Assert.assertEquals(1.5, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(3.0, optimum.getValue()[0], 1e-10);
|
||||
PointVectorValuePair[] optima = optimizer.getOptima();
|
||||
Assert.assertEquals(10, optima.length);
|
||||
for (int i = 0; i < optima.length; i++) {
|
||||
Assert.assertEquals(1.5, optima[i].getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(3.0, optima[i].getValue()[0], 1e-10);
|
||||
}
|
||||
Assert.assertTrue(optimizer.getEvaluations() > 20);
|
||||
Assert.assertTrue(optimizer.getEvaluations() < 50);
|
||||
Assert.assertEquals(100, optimizer.getMaxEvaluations());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIssue914() {
|
||||
LinearProblem problem = new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
|
||||
JacobianMultivariateVectorOptimizer underlyingOptimizer =
|
||||
new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6)) {
|
||||
@Override
|
||||
public PointVectorValuePair optimize(OptimizationData... optData) {
|
||||
// filter out simple bounds, as they are not supported
|
||||
// by the underlying optimizer, and we don't really care for this test
|
||||
OptimizationData[] filtered = optData.clone();
|
||||
for (int i = 0; i < filtered.length; ++i) {
|
||||
if (filtered[i] instanceof SimpleBounds) {
|
||||
filtered[i] = null;
|
||||
}
|
||||
}
|
||||
return super.optimize(filtered);
|
||||
}
|
||||
};
|
||||
JDKRandomGenerator g = new JDKRandomGenerator();
|
||||
g.setSeed(16069223052l);
|
||||
RandomVectorGenerator generator =
|
||||
new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
|
||||
MultiStartMultivariateVectorOptimizer optimizer =
|
||||
new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
|
||||
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0 }),
|
||||
new SimpleBounds(new double[] { -1.0e-10 }, new double[] { 1.0e-10 }));
|
||||
PointVectorValuePair[] optima = optimizer.getOptima();
|
||||
// only the first start should have succeeded
|
||||
Assert.assertEquals(1, optima.length);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test demonstrating that the user exception is finally thrown if none
|
||||
* of the runs succeed.
|
||||
*/
|
||||
@Test(expected=TestException.class)
|
||||
public void testNoOptimum() {
|
||||
JacobianMultivariateVectorOptimizer underlyingOptimizer
|
||||
= new GaussNewtonOptimizer(true, new SimpleVectorValueChecker(1e-6, 1e-6));
|
||||
JDKRandomGenerator g = new JDKRandomGenerator();
|
||||
g.setSeed(12373523445l);
|
||||
RandomVectorGenerator generator
|
||||
= new UncorrelatedRandomVectorGenerator(1, new GaussianRandomGenerator(g));
|
||||
MultiStartMultivariateVectorOptimizer optimizer
|
||||
= new MultiStartMultivariateVectorOptimizer(underlyingOptimizer, 10, generator);
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
new Target(new double[] { 0 }),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0 }),
|
||||
new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] point) {
|
||||
throw new TestException();
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
private static class TestException extends RuntimeException {
|
||||
|
||||
private static final long serialVersionUID = 1L;}
|
||||
|
||||
private static class LinearProblem {
|
||||
private final RealMatrix factors;
|
||||
private final double[] target;
|
||||
|
||||
public LinearProblem(double[][] factors,
|
||||
double[] target) {
|
||||
this.factors = new BlockRealMatrix(factors);
|
||||
this.target = target;
|
||||
}
|
||||
|
||||
public Target getTarget() {
|
||||
return new Target(target);
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] variables) {
|
||||
return factors.operate(variables);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] point) {
|
||||
return factors.getData();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,641 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.exception.ConvergenceException;
|
||||
import org.apache.commons.math4.exception.DimensionMismatchException;
|
||||
import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
|
||||
import org.apache.commons.math4.linear.BlockRealMatrix;
|
||||
import org.apache.commons.math4.linear.RealMatrix;
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class AbstractLeastSquaresOptimizerAbstractTest {
|
||||
|
||||
public abstract AbstractLeastSquaresOptimizer createOptimizer();
|
||||
|
||||
@Test
|
||||
public void testGetIterations() {
|
||||
AbstractLeastSquaresOptimizer optim = createOptimizer();
|
||||
optim.optimize(new MaxEval(100), new Target(new double[] { 1 }),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 3 }),
|
||||
new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] point) {
|
||||
return new double[] {
|
||||
FastMath.pow(point[0], 4)
|
||||
};
|
||||
}
|
||||
}),
|
||||
new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] point) {
|
||||
return new double[][] {
|
||||
{ 0.25 * FastMath.pow(point[0], 3) }
|
||||
};
|
||||
}
|
||||
}));
|
||||
|
||||
Assert.assertTrue(optim.getIterations() > 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTrivial() {
|
||||
LinearProblem problem
|
||||
= new LinearProblem(new double[][] { { 2 } }, new double[] { 3 });
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(1.5, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(3.0, optimum.getValue()[0], 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQRColumnsPermutation() {
|
||||
|
||||
LinearProblem problem
|
||||
= new LinearProblem(new double[][] { { 1, -1 }, { 0, 2 }, { 1, -2 } },
|
||||
new double[] { 4, 6, 1 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(7, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(3, optimum.getPoint()[1], 1e-10);
|
||||
Assert.assertEquals(4, optimum.getValue()[0], 1e-10);
|
||||
Assert.assertEquals(6, optimum.getValue()[1], 1e-10);
|
||||
Assert.assertEquals(1, optimum.getValue()[2], 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoDependency() {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 2, 0, 0, 0, 0, 0 },
|
||||
{ 0, 2, 0, 0, 0, 0 },
|
||||
{ 0, 0, 2, 0, 0, 0 },
|
||||
{ 0, 0, 0, 2, 0, 0 },
|
||||
{ 0, 0, 0, 0, 2, 0 },
|
||||
{ 0, 0, 0, 0, 0, 2 }
|
||||
}, new double[] { 0, 1.1, 2.2, 3.3, 4.4, 5.5 });
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
for (int i = 0; i < problem.target.length; ++i) {
|
||||
Assert.assertEquals(0.55 * i, optimum.getPoint()[i], 1e-10);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOneSet() {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 0, 0 },
|
||||
{ -1, 1, 0 },
|
||||
{ 0, -1, 1 }
|
||||
}, new double[] { 1, 1, 1});
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(1, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(2, optimum.getPoint()[1], 1e-10);
|
||||
Assert.assertEquals(3, optimum.getPoint()[2], 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTwoSets() {
|
||||
double epsilon = 1e-7;
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 2, 1, 0, 4, 0, 0 },
|
||||
{ -4, -2, 3, -7, 0, 0 },
|
||||
{ 4, 1, -2, 8, 0, 0 },
|
||||
{ 0, -3, -12, -1, 0, 0 },
|
||||
{ 0, 0, 0, 0, epsilon, 1 },
|
||||
{ 0, 0, 0, 0, 1, 1 }
|
||||
}, new double[] { 2, -9, 2, 2, 1 + epsilon * epsilon, 2});
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0, 0, 0, 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(3, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(4, optimum.getPoint()[1], 1e-10);
|
||||
Assert.assertEquals(-1, optimum.getPoint()[2], 1e-10);
|
||||
Assert.assertEquals(-2, optimum.getPoint()[3], 1e-10);
|
||||
Assert.assertEquals(1 + epsilon, optimum.getPoint()[4], 1e-10);
|
||||
Assert.assertEquals(1 - epsilon, optimum.getPoint()[5], 1e-10);
|
||||
}
|
||||
|
||||
@Test(expected=ConvergenceException.class)
|
||||
public void testNonInvertible() throws Exception {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 2, -3 },
|
||||
{ 2, 1, 3 },
|
||||
{ -3, 0, -9 }
|
||||
}, new double[] { 1, 1, 1 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0, 0 }));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIllConditioned() {
|
||||
LinearProblem problem1 = new LinearProblem(new double[][] {
|
||||
{ 10, 7, 8, 7 },
|
||||
{ 7, 5, 6, 5 },
|
||||
{ 8, 6, 10, 9 },
|
||||
{ 7, 5, 9, 10 }
|
||||
}, new double[] { 32, 23, 33, 31 });
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum1 =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem1.getModelFunction(),
|
||||
problem1.getModelFunctionJacobian(),
|
||||
problem1.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 1, 2, 3 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(1, optimum1.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(1, optimum1.getPoint()[1], 1e-10);
|
||||
Assert.assertEquals(1, optimum1.getPoint()[2], 1e-10);
|
||||
Assert.assertEquals(1, optimum1.getPoint()[3], 1e-10);
|
||||
|
||||
LinearProblem problem2 = new LinearProblem(new double[][] {
|
||||
{ 10.00, 7.00, 8.10, 7.20 },
|
||||
{ 7.08, 5.04, 6.00, 5.00 },
|
||||
{ 8.00, 5.98, 9.89, 9.00 },
|
||||
{ 6.99, 4.99, 9.00, 9.98 }
|
||||
}, new double[] { 32, 23, 33, 31 });
|
||||
PointVectorValuePair optimum2 =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem2.getModelFunction(),
|
||||
problem2.getModelFunctionJacobian(),
|
||||
problem2.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 1, 2, 3 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(-81, optimum2.getPoint()[0], 1e-8);
|
||||
Assert.assertEquals(137, optimum2.getPoint()[1], 1e-8);
|
||||
Assert.assertEquals(-34, optimum2.getPoint()[2], 1e-8);
|
||||
Assert.assertEquals( 22, optimum2.getPoint()[3], 1e-8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMoreEstimatedParametersSimple() {
|
||||
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 3, 2, 0, 0 },
|
||||
{ 0, 1, -1, 1 },
|
||||
{ 2, 0, 1, 0 }
|
||||
}, new double[] { 7, 3, 5 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 7, 6, 5, 4 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMoreEstimatedParametersUnsorted() {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 1, 0, 0, 0, 0 },
|
||||
{ 0, 0, 1, 1, 1, 0 },
|
||||
{ 0, 0, 0, 0, 1, -1 },
|
||||
{ 0, 0, -1, 1, 0, 1 },
|
||||
{ 0, 0, 0, -1, 1, 0 }
|
||||
}, new double[] { 3, 12, -1, 7, 1 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 2, 2, 2, 2, 2, 2 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(3, optimum.getPointRef()[2], 1e-10);
|
||||
Assert.assertEquals(4, optimum.getPointRef()[3], 1e-10);
|
||||
Assert.assertEquals(5, optimum.getPointRef()[4], 1e-10);
|
||||
Assert.assertEquals(6, optimum.getPointRef()[5], 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRedundantEquations() {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 1 },
|
||||
{ 1, -1 },
|
||||
{ 1, 3 }
|
||||
}, new double[] { 3, 1, 5 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 1, 1 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(2, optimum.getPointRef()[0], 1e-10);
|
||||
Assert.assertEquals(1, optimum.getPointRef()[1], 1e-10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInconsistentEquations() {
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 1 },
|
||||
{ 1, -1 },
|
||||
{ 1, 3 }
|
||||
}, new double[] { 3, 1, 4 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 1, 1 }));
|
||||
Assert.assertTrue(optimizer.getRMS() > 0.1);
|
||||
}
|
||||
|
||||
@Test(expected=DimensionMismatchException.class)
|
||||
public void testInconsistentSizes1() {
|
||||
LinearProblem problem
|
||||
= new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } },
|
||||
new double[] { -1, 1 });
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(-1, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(1, optimum.getPoint()[1], 1e-10);
|
||||
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
}
|
||||
|
||||
@Test(expected=DimensionMismatchException.class)
|
||||
public void testInconsistentSizes2() {
|
||||
LinearProblem problem
|
||||
= new LinearProblem(new double[][] { { 1, 0 }, { 0, 1 } },
|
||||
new double[] { -1, 1 });
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
Assert.assertEquals(0, optimizer.getRMS(), 1e-10);
|
||||
Assert.assertEquals(-1, optimum.getPoint()[0], 1e-10);
|
||||
Assert.assertEquals(1, optimum.getPoint()[1], 1e-10);
|
||||
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(new double[] { 1 }),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCircleFitting() {
|
||||
CircleVectorial circle = new CircleVectorial();
|
||||
circle.addPoint( 30, 68);
|
||||
circle.addPoint( 50, -6);
|
||||
circle.addPoint(110, -20);
|
||||
circle.addPoint( 35, 15);
|
||||
circle.addPoint( 45, 97);
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(new double[] { 0, 0, 0, 0, 0 }),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 98.680, 47.345 }));
|
||||
Assert.assertTrue(optimizer.getEvaluations() < 10);
|
||||
double rms = optimizer.getRMS();
|
||||
Assert.assertEquals(1.768262623567235, FastMath.sqrt(circle.getN()) * rms, 1e-10);
|
||||
Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
|
||||
Assert.assertEquals(69.96016176931406, circle.getRadius(center), 1e-6);
|
||||
Assert.assertEquals(96.07590211815305, center.getX(), 1e-6);
|
||||
Assert.assertEquals(48.13516790438953, center.getY(), 1e-6);
|
||||
double[][] cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
|
||||
Assert.assertEquals(1.839, cov[0][0], 0.001);
|
||||
Assert.assertEquals(0.731, cov[0][1], 0.001);
|
||||
Assert.assertEquals(cov[0][1], cov[1][0], 1e-14);
|
||||
Assert.assertEquals(0.786, cov[1][1], 0.001);
|
||||
|
||||
// add perfect measurements and check errors are reduced
|
||||
double r = circle.getRadius(center);
|
||||
for (double d= 0; d < 2 * FastMath.PI; d += 0.01) {
|
||||
circle.addPoint(center.getX() + r * FastMath.cos(d), center.getY() + r * FastMath.sin(d));
|
||||
}
|
||||
double[] target = new double[circle.getN()];
|
||||
Arrays.fill(target, 0);
|
||||
double[] weights = new double[circle.getN()];
|
||||
Arrays.fill(weights, 2);
|
||||
optimum = optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(target),
|
||||
new Weight(weights),
|
||||
new InitialGuess(new double[] { 98.680, 47.345 }));
|
||||
cov = optimizer.computeCovariances(optimum.getPoint(), 1e-14);
|
||||
Assert.assertEquals(0.0016, cov[0][0], 0.001);
|
||||
Assert.assertEquals(3.2e-7, cov[0][1], 1e-9);
|
||||
Assert.assertEquals(cov[0][1], cov[1][0], 1e-14);
|
||||
Assert.assertEquals(0.0016, cov[1][1], 0.001);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCircleFittingBadInit() {
|
||||
CircleVectorial circle = new CircleVectorial();
|
||||
double[][] points = circlePoints;
|
||||
double[] target = new double[points.length];
|
||||
Arrays.fill(target, 0);
|
||||
double[] weights = new double[points.length];
|
||||
Arrays.fill(weights, 2);
|
||||
for (int i = 0; i < points.length; ++i) {
|
||||
circle.addPoint(points[i][0], points[i][1]);
|
||||
}
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(target),
|
||||
new Weight(weights),
|
||||
new InitialGuess(new double[] { -12, -12 }));
|
||||
Vector2D center = new Vector2D(optimum.getPointRef()[0], optimum.getPointRef()[1]);
|
||||
Assert.assertTrue(optimizer.getEvaluations() < 25);
|
||||
Assert.assertEquals( 0.043, optimizer.getRMS(), 1e-3);
|
||||
Assert.assertEquals( 0.292235, circle.getRadius(center), 1e-6);
|
||||
Assert.assertEquals(-0.151738, center.getX(), 1e-6);
|
||||
Assert.assertEquals( 0.2075001, center.getY(), 1e-6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCircleFittingGoodInit() {
|
||||
CircleVectorial circle = new CircleVectorial();
|
||||
double[][] points = circlePoints;
|
||||
double[] target = new double[points.length];
|
||||
Arrays.fill(target, 0);
|
||||
double[] weights = new double[points.length];
|
||||
Arrays.fill(weights, 2);
|
||||
for (int i = 0; i < points.length; ++i) {
|
||||
circle.addPoint(points[i][0], points[i][1]);
|
||||
}
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum =
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(target),
|
||||
new Weight(weights),
|
||||
new InitialGuess(new double[] { 0, 0 }));
|
||||
Assert.assertEquals(-0.1517383071957963, optimum.getPointRef()[0], 1e-6);
|
||||
Assert.assertEquals(0.2074999736353867, optimum.getPointRef()[1], 1e-6);
|
||||
Assert.assertEquals(0.04268731682389561, optimizer.getRMS(), 1e-8);
|
||||
}
|
||||
|
||||
private final double[][] circlePoints = new double[][] {
|
||||
{-0.312967, 0.072366}, {-0.339248, 0.132965}, {-0.379780, 0.202724},
|
||||
{-0.390426, 0.260487}, {-0.361212, 0.328325}, {-0.346039, 0.392619},
|
||||
{-0.280579, 0.444306}, {-0.216035, 0.470009}, {-0.149127, 0.493832},
|
||||
{-0.075133, 0.483271}, {-0.007759, 0.452680}, { 0.060071, 0.410235},
|
||||
{ 0.103037, 0.341076}, { 0.118438, 0.273884}, { 0.131293, 0.192201},
|
||||
{ 0.115869, 0.129797}, { 0.072223, 0.058396}, { 0.022884, 0.000718},
|
||||
{-0.053355, -0.020405}, {-0.123584, -0.032451}, {-0.216248, -0.032862},
|
||||
{-0.278592, -0.005008}, {-0.337655, 0.056658}, {-0.385899, 0.112526},
|
||||
{-0.405517, 0.186957}, {-0.415374, 0.262071}, {-0.387482, 0.343398},
|
||||
{-0.347322, 0.397943}, {-0.287623, 0.458425}, {-0.223502, 0.475513},
|
||||
{-0.135352, 0.478186}, {-0.061221, 0.483371}, { 0.003711, 0.422737},
|
||||
{ 0.065054, 0.375830}, { 0.108108, 0.297099}, { 0.123882, 0.222850},
|
||||
{ 0.117729, 0.134382}, { 0.085195, 0.056820}, { 0.029800, -0.019138},
|
||||
{-0.027520, -0.072374}, {-0.102268, -0.091555}, {-0.200299, -0.106578},
|
||||
{-0.292731, -0.091473}, {-0.356288, -0.051108}, {-0.420561, 0.014926},
|
||||
{-0.471036, 0.074716}, {-0.488638, 0.182508}, {-0.485990, 0.254068},
|
||||
{-0.463943, 0.338438}, {-0.406453, 0.404704}, {-0.334287, 0.466119},
|
||||
{-0.254244, 0.503188}, {-0.161548, 0.495769}, {-0.075733, 0.495560},
|
||||
{ 0.001375, 0.434937}, { 0.082787, 0.385806}, { 0.115490, 0.323807},
|
||||
{ 0.141089, 0.223450}, { 0.138693, 0.131703}, { 0.126415, 0.049174},
|
||||
{ 0.066518, -0.010217}, {-0.005184, -0.070647}, {-0.080985, -0.103635},
|
||||
{-0.177377, -0.116887}, {-0.260628, -0.100258}, {-0.335756, -0.056251},
|
||||
{-0.405195, -0.000895}, {-0.444937, 0.085456}, {-0.484357, 0.175597},
|
||||
{-0.472453, 0.248681}, {-0.438580, 0.347463}, {-0.402304, 0.422428},
|
||||
{-0.326777, 0.479438}, {-0.247797, 0.505581}, {-0.152676, 0.519380},
|
||||
{-0.071754, 0.516264}, { 0.015942, 0.472802}, { 0.076608, 0.419077},
|
||||
{ 0.127673, 0.330264}, { 0.159951, 0.262150}, { 0.153530, 0.172681},
|
||||
{ 0.140653, 0.089229}, { 0.078666, 0.024981}, { 0.023807, -0.037022},
|
||||
{-0.048837, -0.077056}, {-0.127729, -0.075338}, {-0.221271, -0.067526}
|
||||
};
|
||||
|
||||
public void doTestStRD(final StatisticalReferenceDataset dataset,
|
||||
final double errParams,
|
||||
final double errParamsSd) {
|
||||
final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
final double[] w = new double[dataset.getNumObservations()];
|
||||
Arrays.fill(w, 1);
|
||||
|
||||
final double[][] data = dataset.getData();
|
||||
final double[] initial = dataset.getStartingPoint(0);
|
||||
final StatisticalReferenceDataset.LeastSquaresProblem problem = dataset.getLeastSquaresProblem();
|
||||
final PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(data[1]),
|
||||
new Weight(w),
|
||||
new InitialGuess(initial));
|
||||
|
||||
final double[] actual = optimum.getPoint();
|
||||
for (int i = 0; i < actual.length; i++) {
|
||||
double expected = dataset.getParameter(i);
|
||||
double delta = FastMath.abs(errParams * expected);
|
||||
Assert.assertEquals(dataset.getName() + ", param #" + i,
|
||||
expected, actual[i], delta);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKirby2() throws IOException {
|
||||
doTestStRD(StatisticalReferenceDatasetFactory.createKirby2(), 1E-7, 1E-7);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHahn1() throws IOException {
|
||||
doTestStRD(StatisticalReferenceDatasetFactory.createHahn1(), 1E-7, 1E-4);
|
||||
}
|
||||
|
||||
static class LinearProblem {
|
||||
private final RealMatrix factors;
|
||||
private final double[] target;
|
||||
|
||||
public LinearProblem(double[][] factors, double[] target) {
|
||||
this.factors = new BlockRealMatrix(factors);
|
||||
this.target = target;
|
||||
}
|
||||
|
||||
public Target getTarget() {
|
||||
return new Target(target);
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] params) {
|
||||
return factors.operate(params);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] params) {
|
||||
return factors.getData();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to You under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
|
||||
* or agreed to in writing, software distributed under the License is
|
||||
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the specific language
|
||||
* governing permissions and limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.junit.Test;
|
||||
import org.junit.Assert;
|
||||
|
||||
@Deprecated
|
||||
public class AbstractLeastSquaresOptimizerTest {
|
||||
|
||||
public static AbstractLeastSquaresOptimizer createOptimizer() {
|
||||
return new AbstractLeastSquaresOptimizer(null) {
|
||||
|
||||
@Override
|
||||
protected PointVectorValuePair doOptimize() {
|
||||
final double[] params = getStartPoint();
|
||||
final double[] res = computeResiduals(computeObjectiveValue(params));
|
||||
setCost(computeCost(res));
|
||||
return new PointVectorValuePair(params, null);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetChiSquare() throws IOException {
|
||||
final StatisticalReferenceDataset dataset
|
||||
= StatisticalReferenceDatasetFactory.createKirby2();
|
||||
final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
final double[] a = dataset.getParameters();
|
||||
final double[] y = dataset.getData()[1];
|
||||
final double[] w = new double[y.length];
|
||||
Arrays.fill(w, 1.0);
|
||||
|
||||
StatisticalReferenceDataset.LeastSquaresProblem problem
|
||||
= dataset.getLeastSquaresProblem();
|
||||
|
||||
optimizer.optimize(new MaxEval(1),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(y),
|
||||
new Weight(w),
|
||||
new InitialGuess(a));
|
||||
final double expected = dataset.getResidualSumOfSquares();
|
||||
final double actual = optimizer.getChiSquare();
|
||||
Assert.assertEquals(dataset.getName(), expected, actual,
|
||||
1E-11 * expected);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetRMS() throws IOException {
|
||||
final StatisticalReferenceDataset dataset
|
||||
= StatisticalReferenceDatasetFactory.createKirby2();
|
||||
final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
final double[] a = dataset.getParameters();
|
||||
final double[] y = dataset.getData()[1];
|
||||
final double[] w = new double[y.length];
|
||||
Arrays.fill(w, 1);
|
||||
|
||||
StatisticalReferenceDataset.LeastSquaresProblem problem
|
||||
= dataset.getLeastSquaresProblem();
|
||||
|
||||
optimizer.optimize(new MaxEval(1),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(y),
|
||||
new Weight(w),
|
||||
new InitialGuess(a));
|
||||
|
||||
final double expected = FastMath
|
||||
.sqrt(dataset.getResidualSumOfSquares() /
|
||||
dataset.getNumObservations());
|
||||
final double actual = optimizer.getRMS();
|
||||
Assert.assertEquals(dataset.getName(), expected, actual,
|
||||
1E-11 * expected);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testComputeSigma() throws IOException {
|
||||
final StatisticalReferenceDataset dataset
|
||||
= StatisticalReferenceDatasetFactory.createKirby2();
|
||||
final AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
final double[] a = dataset.getParameters();
|
||||
final double[] y = dataset.getData()[1];
|
||||
final double[] w = new double[y.length];
|
||||
Arrays.fill(w, 1);
|
||||
|
||||
StatisticalReferenceDataset.LeastSquaresProblem problem
|
||||
= dataset.getLeastSquaresProblem();
|
||||
|
||||
final PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(1),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(y),
|
||||
new Weight(w),
|
||||
new InitialGuess(a));
|
||||
|
||||
final double[] sig = optimizer.computeSigma(optimum.getPoint(), 1e-14);
|
||||
|
||||
final int dof = y.length - a.length;
|
||||
final double[] expected = dataset.getParametersStandardDeviations();
|
||||
for (int i = 0; i < sig.length; i++) {
|
||||
final double actual = FastMath.sqrt(optimizer.getChiSquare() / dof) * sig[i];
|
||||
Assert.assertEquals(dataset.getName() + ", parameter #" + i,
|
||||
expected[i], actual, 1e-6 * expected[i]);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,335 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to You under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
* http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
|
||||
* or agreed to in writing, software distributed under the License is
|
||||
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the specific language
|
||||
* governing permissions and limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.awt.geom.Point2D;
|
||||
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer;
|
||||
import org.apache.commons.math4.stat.descriptive.StatisticalSummary;
|
||||
import org.apache.commons.math4.stat.descriptive.SummaryStatistics;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.junit.Test;
|
||||
import org.junit.Assert;
|
||||
|
||||
/**
|
||||
* This class demonstrates the main functionality of the
|
||||
* {@link AbstractLeastSquaresOptimizer}, common to the
|
||||
* optimizer implementations in package
|
||||
* {@link org.apache.commons.math4.optimization.general}.
|
||||
* <br/>
|
||||
* Not enabled by default, as the class name does not end with "Test".
|
||||
* <br/>
|
||||
* Invoke by running
|
||||
* <pre><code>
|
||||
* mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation
|
||||
* </code></pre>
|
||||
* or by running
|
||||
* <pre><code>
|
||||
* mvn test -Dtest=AbstractLeastSquaresOptimizerTestValidation -DargLine="-DmcRuns=1234 -server"
|
||||
* </code></pre>
|
||||
*/
|
||||
@Deprecated
|
||||
public class AbstractLeastSquaresOptimizerTestValidation {
|
||||
private static final int MONTE_CARLO_RUNS = Integer.parseInt(System.getProperty("mcRuns",
|
||||
"100"));
|
||||
|
||||
/**
|
||||
* Using a Monte-Carlo procedure, this test checks the error estimations
|
||||
* as provided by the square-root of the diagonal elements of the
|
||||
* covariance matrix.
|
||||
* <br/>
|
||||
* The test generates sets of observations, each sampled from
|
||||
* a Gaussian distribution.
|
||||
* <br/>
|
||||
* The optimization problem solved is defined in class
|
||||
* {@link StraightLineProblem}.
|
||||
* <br/>
|
||||
* The output (on stdout) will be a table summarizing the distribution
|
||||
* of parameters generated by the Monte-Carlo process and by the direct
|
||||
* estimation provided by the diagonal elements of the covariance matrix.
|
||||
*/
|
||||
@Test
|
||||
public void testParametersErrorMonteCarloObservations() {
|
||||
// Error on the observations.
|
||||
final double yError = 15;
|
||||
|
||||
// True values of the parameters.
|
||||
final double slope = 123.456;
|
||||
final double offset = -98.765;
|
||||
|
||||
// Samples generator.
|
||||
final RandomStraightLinePointGenerator lineGenerator
|
||||
= new RandomStraightLinePointGenerator(slope, offset,
|
||||
yError,
|
||||
-1e3, 1e4,
|
||||
138577L);
|
||||
|
||||
// Number of observations.
|
||||
final int numObs = 100; // XXX Should be a command-line option.
|
||||
// number of parameters.
|
||||
final int numParams = 2;
|
||||
|
||||
// Parameters found for each of Monte-Carlo run.
|
||||
final SummaryStatistics[] paramsFoundByDirectSolution = new SummaryStatistics[numParams];
|
||||
// Sigma estimations (square-root of the diagonal elements of the
|
||||
// covariance matrix), for each Monte-Carlo run.
|
||||
final SummaryStatistics[] sigmaEstimate = new SummaryStatistics[numParams];
|
||||
|
||||
// Initialize statistics accumulators.
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
paramsFoundByDirectSolution[i] = new SummaryStatistics();
|
||||
sigmaEstimate[i] = new SummaryStatistics();
|
||||
}
|
||||
|
||||
// Dummy optimizer (to compute the covariance matrix).
|
||||
final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
|
||||
final double[] init = { slope, offset };
|
||||
|
||||
// Monte-Carlo (generates many sets of observations).
|
||||
final int mcRepeat = MONTE_CARLO_RUNS;
|
||||
int mcCount = 0;
|
||||
while (mcCount < mcRepeat) {
|
||||
// Observations.
|
||||
final Point2D.Double[] obs = lineGenerator.generate(numObs);
|
||||
|
||||
final StraightLineProblem problem = new StraightLineProblem(yError);
|
||||
for (int i = 0; i < numObs; i++) {
|
||||
final Point2D.Double p = obs[i];
|
||||
problem.addPoint(p.x, p.y);
|
||||
}
|
||||
|
||||
// Direct solution (using simple regression).
|
||||
final double[] regress = problem.solve();
|
||||
|
||||
// Estimation of the standard deviation (diagonal elements of the
|
||||
// covariance matrix).
|
||||
final PointVectorValuePair optimum
|
||||
= optim.optimize(new MaxEval(Integer.MAX_VALUE),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(problem.target()),
|
||||
new Weight(problem.weight()),
|
||||
new InitialGuess(init));
|
||||
final double[] sigma = optim.computeSigma(optimum.getPoint(), 1e-14);
|
||||
|
||||
// Accumulate statistics.
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
paramsFoundByDirectSolution[i].addValue(regress[i]);
|
||||
sigmaEstimate[i].addValue(sigma[i]);
|
||||
}
|
||||
|
||||
// Next Monte-Carlo.
|
||||
++mcCount;
|
||||
}
|
||||
|
||||
// Print statistics.
|
||||
final String line = "--------------------------------------------------------------";
|
||||
System.out.println(" True value Mean Std deviation");
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
System.out.println(line);
|
||||
System.out.println("Parameter #" + i);
|
||||
|
||||
StatisticalSummary s = paramsFoundByDirectSolution[i].getSummary();
|
||||
System.out.printf(" %+.6e %+.6e %+.6e\n",
|
||||
init[i],
|
||||
s.getMean(),
|
||||
s.getStandardDeviation());
|
||||
|
||||
s = sigmaEstimate[i].getSummary();
|
||||
System.out.printf("sigma: %+.6e (%+.6e)\n",
|
||||
s.getMean(),
|
||||
s.getStandardDeviation());
|
||||
}
|
||||
System.out.println(line);
|
||||
|
||||
// Check the error estimation.
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
Assert.assertEquals(paramsFoundByDirectSolution[i].getSummary().getStandardDeviation(),
|
||||
sigmaEstimate[i].getSummary().getMean(),
|
||||
8e-2);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In this test, the set of observations is fixed.
|
||||
* Using a Monte-Carlo procedure, it generates sets of parameters,
|
||||
* and determine the parameter change that will result in the
|
||||
* normalized chi-square becoming larger by one than the value from
|
||||
* the best fit solution.
|
||||
* <br/>
|
||||
* The optimization problem solved is defined in class
|
||||
* {@link StraightLineProblem}.
|
||||
* <br/>
|
||||
* The output (on stdout) will be a list of lines containing:
|
||||
* <ul>
|
||||
* <li>slope of the straight line,</li>
|
||||
* <li>intercept of the straight line,</li>
|
||||
* <li>chi-square of the solution defined by the above two values.</li>
|
||||
* </ul>
|
||||
* The output is separated into two blocks (with a blank line between
|
||||
* them); the first block will contain all parameter sets for which
|
||||
* {@code chi2 < chi2_b + 1}
|
||||
* and the second block, all sets for which
|
||||
* {@code chi2 >= chi2_b + 1}
|
||||
* where {@code chi2_b} is the lowest chi-square (corresponding to the
|
||||
* best solution).
|
||||
*/
|
||||
@Test
|
||||
public void testParametersErrorMonteCarloParameters() {
|
||||
// Error on the observations.
|
||||
final double yError = 15;
|
||||
|
||||
// True values of the parameters.
|
||||
final double slope = 123.456;
|
||||
final double offset = -98.765;
|
||||
|
||||
// Samples generator.
|
||||
final RandomStraightLinePointGenerator lineGenerator
|
||||
= new RandomStraightLinePointGenerator(slope, offset,
|
||||
yError,
|
||||
-1e3, 1e4,
|
||||
13839013L);
|
||||
|
||||
// Number of observations.
|
||||
final int numObs = 10;
|
||||
|
||||
// Create a single set of observations.
|
||||
final Point2D.Double[] obs = lineGenerator.generate(numObs);
|
||||
|
||||
final StraightLineProblem problem = new StraightLineProblem(yError);
|
||||
for (int i = 0; i < numObs; i++) {
|
||||
final Point2D.Double p = obs[i];
|
||||
problem.addPoint(p.x, p.y);
|
||||
}
|
||||
|
||||
// Direct solution (using simple regression).
|
||||
final double[] regress = problem.solve();
|
||||
|
||||
// Dummy optimizer (to compute the chi-square).
|
||||
final AbstractLeastSquaresOptimizer optim = new DummyOptimizer();
|
||||
// Get chi-square of the best parameters set for the given set of
|
||||
// observations.
|
||||
final double bestChi2N = getChi2N(optim, problem, regress);
|
||||
final double[] sigma = optim.computeSigma(regress, 1e-14);
|
||||
|
||||
// Monte-Carlo (generates a grid of parameters).
|
||||
final int mcRepeat = MONTE_CARLO_RUNS;
|
||||
final int gridSize = (int) FastMath.sqrt(mcRepeat);
|
||||
|
||||
// Parameters found for each of Monte-Carlo run.
|
||||
// Index 0 = slope
|
||||
// Index 1 = offset
|
||||
// Index 2 = normalized chi2
|
||||
final List<double[]> paramsAndChi2 = new ArrayList<double[]>(gridSize * gridSize);
|
||||
|
||||
final double slopeRange = 10 * sigma[0];
|
||||
final double offsetRange = 10 * sigma[1];
|
||||
final double minSlope = slope - 0.5 * slopeRange;
|
||||
final double minOffset = offset - 0.5 * offsetRange;
|
||||
final double deltaSlope = slopeRange/ gridSize;
|
||||
final double deltaOffset = offsetRange / gridSize;
|
||||
for (int i = 0; i < gridSize; i++) {
|
||||
final double s = minSlope + i * deltaSlope;
|
||||
for (int j = 0; j < gridSize; j++) {
|
||||
final double o = minOffset + j * deltaOffset;
|
||||
final double chi2N = getChi2N(optim, problem, new double[] {s, o});
|
||||
|
||||
paramsAndChi2.add(new double[] {s, o, chi2N});
|
||||
}
|
||||
}
|
||||
|
||||
// Output (for use with "gnuplot").
|
||||
|
||||
// Some info.
|
||||
|
||||
// For plotting separately sets of parameters that have a large chi2.
|
||||
final double chi2NPlusOne = bestChi2N + 1;
|
||||
int numLarger = 0;
|
||||
|
||||
final String lineFmt = "%+.10e %+.10e %.8e\n";
|
||||
|
||||
// Point with smallest chi-square.
|
||||
System.out.printf(lineFmt, regress[0], regress[1], bestChi2N);
|
||||
System.out.println(); // Empty line.
|
||||
|
||||
// Points within the confidence interval.
|
||||
for (double[] d : paramsAndChi2) {
|
||||
if (d[2] <= chi2NPlusOne) {
|
||||
System.out.printf(lineFmt, d[0], d[1], d[2]);
|
||||
}
|
||||
}
|
||||
System.out.println(); // Empty line.
|
||||
|
||||
// Points outside the confidence interval.
|
||||
for (double[] d : paramsAndChi2) {
|
||||
if (d[2] > chi2NPlusOne) {
|
||||
++numLarger;
|
||||
System.out.printf(lineFmt, d[0], d[1], d[2]);
|
||||
}
|
||||
}
|
||||
System.out.println(); // Empty line.
|
||||
|
||||
System.out.println("# sigma=" + Arrays.toString(sigma));
|
||||
System.out.println("# " + numLarger + " sets filtered out");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the normalized chi-square.
|
||||
*/
|
||||
private double getChi2N(AbstractLeastSquaresOptimizer optim,
|
||||
StraightLineProblem problem,
|
||||
double[] params) {
|
||||
final double[] t = problem.target();
|
||||
final double[] w = problem.weight();
|
||||
|
||||
optim.optimize(new MaxEval(Integer.MAX_VALUE),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(t),
|
||||
new Weight(w),
|
||||
new InitialGuess(params));
|
||||
|
||||
return optim.getChiSquare() / (t.length - params.length);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A dummy optimizer.
|
||||
* Used for computing the covariance matrix.
|
||||
*/
|
||||
@Deprecated
|
||||
class DummyOptimizer extends AbstractLeastSquaresOptimizer {
|
||||
public DummyOptimizer() {
|
||||
super(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method does nothing and returns a dummy value.
|
||||
*/
|
||||
@Override
|
||||
public PointVectorValuePair doOptimize() {
|
||||
final double[] params = getStartPoint();
|
||||
final double[] res = computeResiduals(computeObjectiveValue(params));
|
||||
setCost(computeCost(res));
|
||||
return new PointVectorValuePair(params, null);
|
||||
}
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.apache.commons.math4.util.MathUtils;
|
||||
|
||||
/**
|
||||
* Class that models a circle.
|
||||
* The parameters of problem are:
|
||||
* <ul>
|
||||
* <li>the x-coordinate of the circle center,</li>
|
||||
* <li>the y-coordinate of the circle center,</li>
|
||||
* <li>the radius of the circle.</li>
|
||||
* </ul>
|
||||
* The model functions are:
|
||||
* <ul>
|
||||
* <li>for each triplet (cx, cy, r), the (x, y) coordinates of a point on the
|
||||
* corresponding circle.</li>
|
||||
* </ul>
|
||||
*/
|
||||
@Deprecated
|
||||
class CircleProblem {
|
||||
/** Cloud of points assumed to be fitted by a circle. */
|
||||
private final ArrayList<double[]> points;
|
||||
/** Error on the x-coordinate of the points. */
|
||||
private final double xSigma;
|
||||
/** Error on the y-coordinate of the points. */
|
||||
private final double ySigma;
|
||||
/** Number of points on the circumference (when searching which
|
||||
model point is closest to a given "observation". */
|
||||
private final int resolution;
|
||||
|
||||
/**
|
||||
* @param xError Assumed error for the x-coordinate of the circle points.
|
||||
* @param yError Assumed error for the y-coordinate of the circle points.
|
||||
* @param searchResolution Number of points to try when searching the one
|
||||
* that is closest to a given "observed" point.
|
||||
*/
|
||||
public CircleProblem(double xError,
|
||||
double yError,
|
||||
int searchResolution) {
|
||||
points = new ArrayList<double[]>();
|
||||
xSigma = xError;
|
||||
ySigma = yError;
|
||||
resolution = searchResolution;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param xError Assumed error for the x-coordinate of the circle points.
|
||||
* @param yError Assumed error for the y-coordinate of the circle points.
|
||||
*/
|
||||
public CircleProblem(double xError,
|
||||
double yError) {
|
||||
this(xError, yError, 500);
|
||||
}
|
||||
|
||||
public void addPoint(double px, double py) {
|
||||
points.add(new double[] { px, py });
|
||||
}
|
||||
|
||||
public double[] target() {
|
||||
final double[] t = new double[points.size() * 2];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
final int index = i * 2;
|
||||
t[index] = p[0];
|
||||
t[index + 1] = p[1];
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
public double[] weight() {
|
||||
final double wX = 1 / (xSigma * xSigma);
|
||||
final double wY = 1 / (ySigma * ySigma);
|
||||
final double[] w = new double[points.size() * 2];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final int index = i * 2;
|
||||
w[index] = wX;
|
||||
w[index + 1] = wY;
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] params) {
|
||||
final double cx = params[0];
|
||||
final double cy = params[1];
|
||||
final double r = params[2];
|
||||
|
||||
final double[] model = new double[points.size() * 2];
|
||||
|
||||
final double deltaTheta = MathUtils.TWO_PI / resolution;
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
final double px = p[0];
|
||||
final double py = p[1];
|
||||
|
||||
double bestX = 0;
|
||||
double bestY = 0;
|
||||
double dMin = Double.POSITIVE_INFINITY;
|
||||
|
||||
// Find the angle for which the circle passes closest to the
|
||||
// current point (using a resolution of 100 points along the
|
||||
// circumference).
|
||||
for (double theta = 0; theta <= MathUtils.TWO_PI; theta += deltaTheta) {
|
||||
final double currentX = cx + r * FastMath.cos(theta);
|
||||
final double currentY = cy + r * FastMath.sin(theta);
|
||||
final double dX = currentX - px;
|
||||
final double dY = currentY - py;
|
||||
final double d = dX * dX + dY * dY;
|
||||
if (d < dMin) {
|
||||
dMin = d;
|
||||
bestX = currentX;
|
||||
bestY = currentY;
|
||||
}
|
||||
}
|
||||
|
||||
final int index = i * 2;
|
||||
model[index] = bestX;
|
||||
model[index + 1] = bestY;
|
||||
}
|
||||
|
||||
return model;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] point) {
|
||||
return jacobian(point);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private double[][] jacobian(double[] params) {
|
||||
final double[][] jacobian = new double[points.size() * 2][3];
|
||||
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final int index = i * 2;
|
||||
// Partial derivative wrt x-coordinate of center.
|
||||
jacobian[index][0] = 1;
|
||||
jacobian[index + 1][0] = 0;
|
||||
// Partial derivative wrt y-coordinate of center.
|
||||
jacobian[index][1] = 0;
|
||||
jacobian[index + 1][1] = 1;
|
||||
// Partial derivative wrt radius.
|
||||
final double[] p = points.get(i);
|
||||
jacobian[index][2] = (p[0] - params[0]) / params[2];
|
||||
jacobian[index + 1][2] = (p[1] - params[1]) / params[2];
|
||||
}
|
||||
|
||||
return jacobian;
|
||||
}
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
|
||||
/**
|
||||
* Class used in the tests.
|
||||
*/
|
||||
@Deprecated
|
||||
class CircleVectorial {
|
||||
private ArrayList<Vector2D> points;
|
||||
|
||||
public CircleVectorial() {
|
||||
points = new ArrayList<Vector2D>();
|
||||
}
|
||||
|
||||
public void addPoint(double px, double py) {
|
||||
points.add(new Vector2D(px, py));
|
||||
}
|
||||
|
||||
public int getN() {
|
||||
return points.size();
|
||||
}
|
||||
|
||||
public double getRadius(Vector2D center) {
|
||||
double r = 0;
|
||||
for (Vector2D point : points) {
|
||||
r += point.distance(center);
|
||||
}
|
||||
return r / points.size();
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] params) {
|
||||
Vector2D center = new Vector2D(params[0], params[1]);
|
||||
double radius = getRadius(center);
|
||||
double[] residuals = new double[points.size()];
|
||||
for (int i = 0; i < residuals.length; i++) {
|
||||
residuals[i] = points.get(i).distance(center) - radius;
|
||||
}
|
||||
|
||||
return residuals;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] params) {
|
||||
final int n = points.size();
|
||||
final Vector2D center = new Vector2D(params[0], params[1]);
|
||||
|
||||
double dRdX = 0;
|
||||
double dRdY = 0;
|
||||
for (Vector2D pk : points) {
|
||||
double dk = pk.distance(center);
|
||||
dRdX += (center.getX() - pk.getX()) / dk;
|
||||
dRdY += (center.getY() - pk.getY()) / dk;
|
||||
}
|
||||
dRdX /= n;
|
||||
dRdY /= n;
|
||||
|
||||
// Jacobian of the radius residuals.
|
||||
double[][] jacobian = new double[n][2];
|
||||
for (int i = 0; i < n; i++) {
|
||||
final Vector2D pi = points.get(i);
|
||||
final double di = pi.distance(center);
|
||||
jacobian[i][0] = (center.getX() - pi.getX()) / di - dRdX;
|
||||
jacobian[i][1] = (center.getY() - pi.getY()) / di - dRdY;
|
||||
}
|
||||
|
||||
return jacobian;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.math4.exception.ConvergenceException;
|
||||
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.SimpleBounds;
|
||||
import org.apache.commons.math4.optim.SimpleVectorValueChecker;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.GaussNewtonOptimizer;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
@Deprecated
|
||||
public class GaussNewtonOptimizerTest
|
||||
extends AbstractLeastSquaresOptimizerAbstractTest {
|
||||
|
||||
@Override
|
||||
public AbstractLeastSquaresOptimizer createOptimizer() {
|
||||
return new GaussNewtonOptimizer(new SimpleVectorValueChecker(1.0e-6, 1.0e-6));
|
||||
}
|
||||
|
||||
@Test(expected=MathUnsupportedOperationException.class)
|
||||
public void testConstraintsUnsupported() {
|
||||
createOptimizer().optimize(new MaxEval(100),
|
||||
new Target(new double[] { 2 }),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 1, 2 }),
|
||||
new SimpleBounds(new double[] { -10, 0 },
|
||||
new double[] { 20, 30 }));
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test(expected = ConvergenceException.class)
|
||||
public void testMoreEstimatedParametersSimple() {
|
||||
/*
|
||||
* Exception is expected with this optimizer
|
||||
*/
|
||||
super.testMoreEstimatedParametersSimple();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test(expected=ConvergenceException.class)
|
||||
public void testMoreEstimatedParametersUnsorted() {
|
||||
/*
|
||||
* Exception is expected with this optimizer
|
||||
*/
|
||||
super.testMoreEstimatedParametersUnsorted();
|
||||
}
|
||||
|
||||
@Test(expected=TooManyEvaluationsException.class)
|
||||
public void testMaxEvaluations() throws Exception {
|
||||
CircleVectorial circle = new CircleVectorial();
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
|
||||
GaussNewtonOptimizer optimizer
|
||||
= new GaussNewtonOptimizer(new SimpleVectorValueChecker(1e-30, 1e-30));
|
||||
|
||||
optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(new double[] { 0, 0, 0, 0, 0 }),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 98.680, 47.345 }));
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test(expected=ConvergenceException.class)
|
||||
public void testCircleFittingBadInit() {
|
||||
/*
|
||||
* This test does not converge with this optimizer.
|
||||
*/
|
||||
super.testCircleFittingBadInit();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test(expected = ConvergenceException.class)
|
||||
public void testHahn1()
|
||||
throws IOException {
|
||||
/*
|
||||
* TODO This test leads to a singular problem with the Gauss-Newton
|
||||
* optimizer. This should be inquired.
|
||||
*/
|
||||
super.testHahn1();
|
||||
}
|
||||
}
|
|
@ -1,375 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.exception.DimensionMismatchException;
|
||||
import org.apache.commons.math4.exception.MathUnsupportedOperationException;
|
||||
import org.apache.commons.math4.exception.TooManyEvaluationsException;
|
||||
import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
|
||||
import org.apache.commons.math4.linear.SingularMatrixException;
|
||||
import org.apache.commons.math4.optim.InitialGuess;
|
||||
import org.apache.commons.math4.optim.MaxEval;
|
||||
import org.apache.commons.math4.optim.PointVectorValuePair;
|
||||
import org.apache.commons.math4.optim.SimpleBounds;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Target;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.Weight;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.jacobian.LevenbergMarquardtOptimizer;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.apache.commons.math4.util.Precision;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* <p>Some of the unit tests are re-implementations of the MINPACK <a
|
||||
* href="http://www.netlib.org/minpack/ex/file17">file17</a> and <a
|
||||
* href="http://www.netlib.org/minpack/ex/file22">file22</a> test files.
|
||||
* The redistribution policy for MINPACK is available <a
|
||||
* href="http://www.netlib.org/minpack/disclaimer">here</a>, for
|
||||
* convenience, it is reproduced below.</p>
|
||||
|
||||
* <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
|
||||
* <tr><td>
|
||||
* Minpack Copyright Notice (1999) University of Chicago.
|
||||
* All rights reserved
|
||||
* </td></tr>
|
||||
* <tr><td>
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* <ol>
|
||||
* <li>Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.</li>
|
||||
* <li>Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials provided
|
||||
* with the distribution.</li>
|
||||
* <li>The end-user documentation included with the redistribution, if any,
|
||||
* must include the following acknowledgment:
|
||||
* <code>This product includes software developed by the University of
|
||||
* Chicago, as Operator of Argonne National Laboratory.</code>
|
||||
* Alternately, this acknowledgment may appear in the software itself,
|
||||
* if and wherever such third-party acknowledgments normally appear.</li>
|
||||
* <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
|
||||
* WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
|
||||
* UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
|
||||
* THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
|
||||
* OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
|
||||
* OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
|
||||
* USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
|
||||
* THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
|
||||
* DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
|
||||
* UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
|
||||
* BE CORRECTED.</strong></li>
|
||||
* <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
|
||||
* HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
|
||||
* ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
|
||||
* INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
|
||||
* ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
* PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
|
||||
* SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
|
||||
* (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
|
||||
* EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
|
||||
* POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
|
||||
* <ol></td></tr>
|
||||
* </table>
|
||||
|
||||
* @author Argonne National Laboratory. MINPACK project. March 1980 (original fortran minpack tests)
|
||||
* @author Burton S. Garbow (original fortran minpack tests)
|
||||
* @author Kenneth E. Hillstrom (original fortran minpack tests)
|
||||
* @author Jorge J. More (original fortran minpack tests)
|
||||
* @author Luc Maisonobe (non-minpack tests and minpack tests Java translation)
|
||||
*/
|
||||
@Deprecated
|
||||
public class LevenbergMarquardtOptimizerTest
|
||||
extends AbstractLeastSquaresOptimizerAbstractTest {
|
||||
@Override
|
||||
public AbstractLeastSquaresOptimizer createOptimizer() {
|
||||
return new LevenbergMarquardtOptimizer();
|
||||
}
|
||||
|
||||
@Test(expected=MathUnsupportedOperationException.class)
|
||||
public void testConstraintsUnsupported() {
|
||||
createOptimizer().optimize(new MaxEval(100),
|
||||
new Target(new double[] { 2 }),
|
||||
new Weight(new double[] { 1 }),
|
||||
new InitialGuess(new double[] { 1, 2 }),
|
||||
new SimpleBounds(new double[] { -10, 0 },
|
||||
new double[] { 20, 30 }));
|
||||
}
|
||||
|
||||
@Override
|
||||
@Test(expected=SingularMatrixException.class)
|
||||
public void testNonInvertible() {
|
||||
/*
|
||||
* Overrides the method from parent class, since the default singularity
|
||||
* threshold (1e-14) does not trigger the expected exception.
|
||||
*/
|
||||
LinearProblem problem = new LinearProblem(new double[][] {
|
||||
{ 1, 2, -3 },
|
||||
{ 2, 1, 3 },
|
||||
{ -3, 0, -9 }
|
||||
}, new double[] { 1, 1, 1 });
|
||||
|
||||
AbstractLeastSquaresOptimizer optimizer = createOptimizer();
|
||||
PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
problem.getTarget(),
|
||||
new Weight(new double[] { 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 0, 0, 0 }));
|
||||
Assert.assertTrue(FastMath.sqrt(optimizer.getTargetSize()) * optimizer.getRMS() > 0.6);
|
||||
|
||||
optimizer.computeCovariances(optimum.getPoint(), 1.5e-14);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testControlParameters() {
|
||||
CircleVectorial circle = new CircleVectorial();
|
||||
circle.addPoint( 30.0, 68.0);
|
||||
circle.addPoint( 50.0, -6.0);
|
||||
circle.addPoint(110.0, -20.0);
|
||||
circle.addPoint( 35.0, 15.0);
|
||||
circle.addPoint( 45.0, 97.0);
|
||||
checkEstimate(circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
0.1, 10, 1.0e-14, 1.0e-16, 1.0e-10, false);
|
||||
checkEstimate(circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
0.1, 10, 1.0e-15, 1.0e-17, 1.0e-10, true);
|
||||
checkEstimate(circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
0.1, 5, 1.0e-15, 1.0e-16, 1.0e-10, true);
|
||||
circle.addPoint(300, -300);
|
||||
checkEstimate(circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
0.1, 20, 1.0e-18, 1.0e-16, 1.0e-10, true);
|
||||
}
|
||||
|
||||
private void checkEstimate(ModelFunction problem,
|
||||
ModelFunctionJacobian problemJacobian,
|
||||
double initialStepBoundFactor, int maxCostEval,
|
||||
double costRelativeTolerance, double parRelativeTolerance,
|
||||
double orthoTolerance, boolean shouldFail) {
|
||||
try {
|
||||
LevenbergMarquardtOptimizer optimizer
|
||||
= new LevenbergMarquardtOptimizer(initialStepBoundFactor,
|
||||
costRelativeTolerance,
|
||||
parRelativeTolerance,
|
||||
orthoTolerance,
|
||||
Precision.SAFE_MIN);
|
||||
optimizer.optimize(new MaxEval(maxCostEval),
|
||||
problem,
|
||||
problemJacobian,
|
||||
new Target(new double[] { 0, 0, 0, 0, 0 }),
|
||||
new Weight(new double[] { 1, 1, 1, 1, 1 }),
|
||||
new InitialGuess(new double[] { 98.680, 47.345 }));
|
||||
Assert.assertTrue(!shouldFail);
|
||||
} catch (DimensionMismatchException ee) {
|
||||
Assert.assertTrue(shouldFail);
|
||||
} catch (TooManyEvaluationsException ee) {
|
||||
Assert.assertTrue(shouldFail);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Non-linear test case: fitting of decay curve (from Chapter 8 of
|
||||
* Bevington's textbook, "Data reduction and analysis for the physical sciences").
|
||||
* XXX The expected ("reference") values may not be accurate and the tolerance too
|
||||
* relaxed for this test to be currently really useful (the issue is under
|
||||
* investigation).
|
||||
*/
|
||||
@Test
|
||||
public void testBevington() {
|
||||
final double[][] dataPoints = {
|
||||
// column 1 = times
|
||||
{ 15, 30, 45, 60, 75, 90, 105, 120, 135, 150,
|
||||
165, 180, 195, 210, 225, 240, 255, 270, 285, 300,
|
||||
315, 330, 345, 360, 375, 390, 405, 420, 435, 450,
|
||||
465, 480, 495, 510, 525, 540, 555, 570, 585, 600,
|
||||
615, 630, 645, 660, 675, 690, 705, 720, 735, 750,
|
||||
765, 780, 795, 810, 825, 840, 855, 870, 885, },
|
||||
// column 2 = measured counts
|
||||
{ 775, 479, 380, 302, 185, 157, 137, 119, 110, 89,
|
||||
74, 61, 66, 68, 48, 54, 51, 46, 55, 29,
|
||||
28, 37, 49, 26, 35, 29, 31, 24, 25, 35,
|
||||
24, 30, 26, 28, 21, 18, 20, 27, 17, 17,
|
||||
14, 17, 24, 11, 22, 17, 12, 10, 13, 16,
|
||||
9, 9, 14, 21, 17, 13, 12, 18, 10, },
|
||||
};
|
||||
|
||||
final BevingtonProblem problem = new BevingtonProblem();
|
||||
|
||||
final int len = dataPoints[0].length;
|
||||
final double[] weights = new double[len];
|
||||
for (int i = 0; i < len; i++) {
|
||||
problem.addPoint(dataPoints[0][i],
|
||||
dataPoints[1][i]);
|
||||
|
||||
weights[i] = 1 / dataPoints[1][i];
|
||||
}
|
||||
|
||||
final LevenbergMarquardtOptimizer optimizer
|
||||
= new LevenbergMarquardtOptimizer();
|
||||
|
||||
final PointVectorValuePair optimum
|
||||
= optimizer.optimize(new MaxEval(100),
|
||||
problem.getModelFunction(),
|
||||
problem.getModelFunctionJacobian(),
|
||||
new Target(dataPoints[1]),
|
||||
new Weight(weights),
|
||||
new InitialGuess(new double[] { 10, 900, 80, 27, 225 }));
|
||||
|
||||
final double[] solution = optimum.getPoint();
|
||||
final double[] expectedSolution = { 10.4, 958.3, 131.4, 33.9, 205.0 };
|
||||
|
||||
final double[][] covarMatrix = optimizer.computeCovariances(solution, 1e-14);
|
||||
final double[][] expectedCovarMatrix = {
|
||||
{ 3.38, -3.69, 27.98, -2.34, -49.24 },
|
||||
{ -3.69, 2492.26, 81.89, -69.21, -8.9 },
|
||||
{ 27.98, 81.89, 468.99, -44.22, -615.44 },
|
||||
{ -2.34, -69.21, -44.22, 6.39, 53.80 },
|
||||
{ -49.24, -8.9, -615.44, 53.8, 929.45 }
|
||||
};
|
||||
|
||||
final int numParams = expectedSolution.length;
|
||||
|
||||
// Check that the computed solution is within the reference error range.
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
final double error = FastMath.sqrt(expectedCovarMatrix[i][i]);
|
||||
Assert.assertEquals("Parameter " + i, expectedSolution[i], solution[i], error);
|
||||
}
|
||||
|
||||
// Check that each entry of the computed covariance matrix is within 10%
|
||||
// of the reference matrix entry.
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
for (int j = 0; j < numParams; j++) {
|
||||
Assert.assertEquals("Covariance matrix [" + i + "][" + j + "]",
|
||||
expectedCovarMatrix[i][j],
|
||||
covarMatrix[i][j],
|
||||
FastMath.abs(0.1 * expectedCovarMatrix[i][j]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCircleFitting2() {
|
||||
final double xCenter = 123.456;
|
||||
final double yCenter = 654.321;
|
||||
final double xSigma = 10;
|
||||
final double ySigma = 15;
|
||||
final double radius = 111.111;
|
||||
// The test is extremely sensitive to the seed.
|
||||
final long seed = 59421061L;
|
||||
final RandomCirclePointGenerator factory
|
||||
= new RandomCirclePointGenerator(xCenter, yCenter, radius,
|
||||
xSigma, ySigma,
|
||||
seed);
|
||||
final CircleProblem circle = new CircleProblem(xSigma, ySigma);
|
||||
|
||||
final int numPoints = 10;
|
||||
for (Vector2D p : factory.generate(numPoints)) {
|
||||
circle.addPoint(p.getX(), p.getY());
|
||||
}
|
||||
|
||||
// First guess for the center's coordinates and radius.
|
||||
final double[] init = { 90, 659, 115 };
|
||||
|
||||
final LevenbergMarquardtOptimizer optimizer
|
||||
= new LevenbergMarquardtOptimizer();
|
||||
final PointVectorValuePair optimum = optimizer.optimize(new MaxEval(100),
|
||||
circle.getModelFunction(),
|
||||
circle.getModelFunctionJacobian(),
|
||||
new Target(circle.target()),
|
||||
new Weight(circle.weight()),
|
||||
new InitialGuess(init));
|
||||
|
||||
final double[] paramFound = optimum.getPoint();
|
||||
|
||||
// Retrieve errors estimation.
|
||||
final double[] asymptoticStandardErrorFound = optimizer.computeSigma(paramFound, 1e-14);
|
||||
|
||||
// Check that the parameters are found within the assumed error bars.
|
||||
Assert.assertEquals(xCenter, paramFound[0], asymptoticStandardErrorFound[0]);
|
||||
Assert.assertEquals(yCenter, paramFound[1], asymptoticStandardErrorFound[1]);
|
||||
Assert.assertEquals(radius, paramFound[2], asymptoticStandardErrorFound[2]);
|
||||
}
|
||||
|
||||
private static class BevingtonProblem {
|
||||
private List<Double> time;
|
||||
private List<Double> count;
|
||||
|
||||
public BevingtonProblem() {
|
||||
time = new ArrayList<Double>();
|
||||
count = new ArrayList<Double>();
|
||||
}
|
||||
|
||||
public void addPoint(double t, double c) {
|
||||
time.add(t);
|
||||
count.add(c);
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] params) {
|
||||
double[] values = new double[time.size()];
|
||||
for (int i = 0; i < values.length; ++i) {
|
||||
final double t = time.get(i);
|
||||
values[i] = params[0] +
|
||||
params[1] * FastMath.exp(-t / params[3]) +
|
||||
params[2] * FastMath.exp(-t / params[4]);
|
||||
}
|
||||
return values;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] params) {
|
||||
double[][] jacobian = new double[time.size()][5];
|
||||
|
||||
for (int i = 0; i < jacobian.length; ++i) {
|
||||
final double t = time.get(i);
|
||||
jacobian[i][0] = 1;
|
||||
|
||||
final double p3 = params[3];
|
||||
final double p4 = params[4];
|
||||
final double tOp3 = t / p3;
|
||||
final double tOp4 = t / p4;
|
||||
jacobian[i][1] = FastMath.exp(-tOp3);
|
||||
jacobian[i][2] = FastMath.exp(-tOp4);
|
||||
jacobian[i][3] = params[1] * FastMath.exp(-tOp3) * tOp3 / p3;
|
||||
jacobian[i][4] = params[2] * FastMath.exp(-tOp4) * tOp4 / p4;
|
||||
}
|
||||
return jacobian;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,91 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import org.apache.commons.math4.distribution.NormalDistribution;
|
||||
import org.apache.commons.math4.distribution.RealDistribution;
|
||||
import org.apache.commons.math4.distribution.UniformRealDistribution;
|
||||
import org.apache.commons.math4.geometry.euclidean.twod.Vector2D;
|
||||
import org.apache.commons.math4.random.RandomGenerator;
|
||||
import org.apache.commons.math4.random.Well44497b;
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
import org.apache.commons.math4.util.MathUtils;
|
||||
|
||||
/**
|
||||
* Factory for generating a cloud of points that approximate a circle.
|
||||
*/
|
||||
@Deprecated
|
||||
public class RandomCirclePointGenerator {
|
||||
/** RNG for the x-coordinate of the center. */
|
||||
private final RealDistribution cX;
|
||||
/** RNG for the y-coordinate of the center. */
|
||||
private final RealDistribution cY;
|
||||
/** RNG for the parametric position of the point. */
|
||||
private final RealDistribution tP;
|
||||
/** Radius of the circle. */
|
||||
private final double radius;
|
||||
|
||||
/**
|
||||
* @param x Abscissa of the circle center.
|
||||
* @param y Ordinate of the circle center.
|
||||
* @param radius Radius of the circle.
|
||||
* @param xSigma Error on the x-coordinate of the circumference points.
|
||||
* @param ySigma Error on the y-coordinate of the circumference points.
|
||||
* @param seed RNG seed.
|
||||
*/
|
||||
public RandomCirclePointGenerator(double x,
|
||||
double y,
|
||||
double radius,
|
||||
double xSigma,
|
||||
double ySigma,
|
||||
long seed) {
|
||||
final RandomGenerator rng = new Well44497b(seed);
|
||||
this.radius = radius;
|
||||
cX = new NormalDistribution(rng, x, xSigma,
|
||||
NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
|
||||
cY = new NormalDistribution(rng, y, ySigma,
|
||||
NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
|
||||
tP = new UniformRealDistribution(rng, 0, MathUtils.TWO_PI);
|
||||
}
|
||||
|
||||
/**
|
||||
* Point generator.
|
||||
*
|
||||
* @param n Number of points to create.
|
||||
* @return the cloud of {@code n} points.
|
||||
*/
|
||||
public Vector2D[] generate(int n) {
|
||||
final Vector2D[] cloud = new Vector2D[n];
|
||||
for (int i = 0; i < n; i++) {
|
||||
cloud[i] = create();
|
||||
}
|
||||
return cloud;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create one point.
|
||||
*
|
||||
* @return a point.
|
||||
*/
|
||||
private Vector2D create() {
|
||||
final double t = tP.sample();
|
||||
final double pX = cX.sample() + radius * FastMath.cos(t);
|
||||
final double pY = cY.sample() + radius * FastMath.sin(t);
|
||||
|
||||
return new Vector2D(pX, pY);
|
||||
}
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.awt.geom.Point2D;
|
||||
|
||||
import org.apache.commons.math4.distribution.NormalDistribution;
|
||||
import org.apache.commons.math4.distribution.RealDistribution;
|
||||
import org.apache.commons.math4.distribution.UniformRealDistribution;
|
||||
import org.apache.commons.math4.random.RandomGenerator;
|
||||
import org.apache.commons.math4.random.Well44497b;
|
||||
|
||||
/**
|
||||
* Factory for generating a cloud of points that approximate a straight line.
|
||||
*/
|
||||
@Deprecated
|
||||
public class RandomStraightLinePointGenerator {
|
||||
/** Slope. */
|
||||
private final double slope;
|
||||
/** Intercept. */
|
||||
private final double intercept;
|
||||
/** RNG for the x-coordinate. */
|
||||
private final RealDistribution x;
|
||||
/** RNG for the error on the y-coordinate. */
|
||||
private final RealDistribution error;
|
||||
|
||||
/**
|
||||
* The generator will create a cloud of points whose x-coordinates
|
||||
* will be randomly sampled between {@code xLo} and {@code xHi}, and
|
||||
* the corresponding y-coordinates will be computed as
|
||||
* <pre><code>
|
||||
* y = a x + b + N(0, error)
|
||||
* </code></pre>
|
||||
* where {@code N(mean, sigma)} is a Gaussian distribution with the
|
||||
* given mean and standard deviation.
|
||||
*
|
||||
* @param a Slope.
|
||||
* @param b Intercept.
|
||||
* @param sigma Standard deviation on the y-coordinate of the point.
|
||||
* @param lo Lowest value of the x-coordinate.
|
||||
* @param hi Highest value of the x-coordinate.
|
||||
* @param seed RNG seed.
|
||||
*/
|
||||
public RandomStraightLinePointGenerator(double a,
|
||||
double b,
|
||||
double sigma,
|
||||
double lo,
|
||||
double hi,
|
||||
long seed) {
|
||||
final RandomGenerator rng = new Well44497b(seed);
|
||||
slope = a;
|
||||
intercept = b;
|
||||
error = new NormalDistribution(rng, 0, sigma,
|
||||
NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
|
||||
x = new UniformRealDistribution(rng, lo, hi);
|
||||
}
|
||||
|
||||
/**
|
||||
* Point generator.
|
||||
*
|
||||
* @param n Number of points to create.
|
||||
* @return the cloud of {@code n} points.
|
||||
*/
|
||||
public Point2D.Double[] generate(int n) {
|
||||
final Point2D.Double[] cloud = new Point2D.Double[n];
|
||||
for (int i = 0; i < n; i++) {
|
||||
cloud[i] = create();
|
||||
}
|
||||
return cloud;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create one point.
|
||||
*
|
||||
* @return a point.
|
||||
*/
|
||||
private Point2D.Double create() {
|
||||
final double abscissa = x.sample();
|
||||
final double yModel = slope * abscissa + intercept;
|
||||
final double ordinate = yModel + error.sample();
|
||||
|
||||
return new Point2D.Double(abscissa, ordinate);
|
||||
}
|
||||
}
|
|
@ -1,385 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.util.MathArrays;
|
||||
|
||||
/**
|
||||
* This class gives access to the statistical reference datasets provided by the
|
||||
* NIST (available
|
||||
* <a href="http://www.itl.nist.gov/div898/strd/general/dataarchive.html">here</a>).
|
||||
* Instances of this class can be created by invocation of the
|
||||
* {@link StatisticalReferenceDatasetFactory}.
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract class StatisticalReferenceDataset {
|
||||
|
||||
/** The name of this dataset. */
|
||||
private final String name;
|
||||
|
||||
/** The total number of observations (data points). */
|
||||
private final int numObservations;
|
||||
|
||||
/** The total number of parameters. */
|
||||
private final int numParameters;
|
||||
|
||||
/** The total number of starting points for the optimizations. */
|
||||
private final int numStartingPoints;
|
||||
|
||||
/** The values of the predictor. */
|
||||
private final double[] x;
|
||||
|
||||
/** The values of the response. */
|
||||
private final double[] y;
|
||||
|
||||
/**
|
||||
* The starting values. {@code startingValues[j][i]} is the value of the
|
||||
* {@code i}-th parameter in the {@code j}-th set of starting values.
|
||||
*/
|
||||
private final double[][] startingValues;
|
||||
|
||||
/** The certified values of the parameters. */
|
||||
private final double[] a;
|
||||
|
||||
/** The certified values of the standard deviation of the parameters. */
|
||||
private final double[] sigA;
|
||||
|
||||
/** The certified value of the residual sum of squares. */
|
||||
private double residualSumOfSquares;
|
||||
|
||||
/** The least-squares problem. */
|
||||
private final LeastSquaresProblem problem;
|
||||
|
||||
/**
|
||||
* Creates a new instance of this class from the specified data file. The
|
||||
* file must follow the StRD format.
|
||||
*
|
||||
* @param in the data file
|
||||
* @throws IOException if an I/O error occurs
|
||||
*/
|
||||
public StatisticalReferenceDataset(final BufferedReader in)
|
||||
throws IOException {
|
||||
|
||||
final ArrayList<String> lines = new ArrayList<String>();
|
||||
for (String line = in.readLine(); line != null; line = in.readLine()) {
|
||||
lines.add(line);
|
||||
}
|
||||
int[] index = findLineNumbers("Data", lines);
|
||||
if (index == null) {
|
||||
throw new AssertionError("could not find line indices for data");
|
||||
}
|
||||
this.numObservations = index[1] - index[0] + 1;
|
||||
this.x = new double[this.numObservations];
|
||||
this.y = new double[this.numObservations];
|
||||
for (int i = 0; i < this.numObservations; i++) {
|
||||
final String line = lines.get(index[0] + i - 1);
|
||||
final String[] tokens = line.trim().split(" ++");
|
||||
// Data columns are in reverse order!!!
|
||||
this.y[i] = Double.parseDouble(tokens[0]);
|
||||
this.x[i] = Double.parseDouble(tokens[1]);
|
||||
}
|
||||
|
||||
index = findLineNumbers("Starting Values", lines);
|
||||
if (index == null) {
|
||||
throw new AssertionError(
|
||||
"could not find line indices for starting values");
|
||||
}
|
||||
this.numParameters = index[1] - index[0] + 1;
|
||||
|
||||
double[][] start = null;
|
||||
this.a = new double[numParameters];
|
||||
this.sigA = new double[numParameters];
|
||||
for (int i = 0; i < numParameters; i++) {
|
||||
final String line = lines.get(index[0] + i - 1);
|
||||
final String[] tokens = line.trim().split(" ++");
|
||||
if (start == null) {
|
||||
start = new double[tokens.length - 4][numParameters];
|
||||
}
|
||||
for (int j = 2; j < tokens.length - 2; j++) {
|
||||
start[j - 2][i] = Double.parseDouble(tokens[j]);
|
||||
}
|
||||
this.a[i] = Double.parseDouble(tokens[tokens.length - 2]);
|
||||
this.sigA[i] = Double.parseDouble(tokens[tokens.length - 1]);
|
||||
}
|
||||
if (start == null) {
|
||||
throw new IOException("could not find starting values");
|
||||
}
|
||||
this.numStartingPoints = start.length;
|
||||
this.startingValues = start;
|
||||
|
||||
double dummyDouble = Double.NaN;
|
||||
String dummyString = null;
|
||||
for (String line : lines) {
|
||||
if (line.contains("Dataset Name:")) {
|
||||
dummyString = line
|
||||
.substring(line.indexOf("Dataset Name:") + 13,
|
||||
line.indexOf("(")).trim();
|
||||
}
|
||||
if (line.contains("Residual Sum of Squares")) {
|
||||
final String[] tokens = line.split(" ++");
|
||||
dummyDouble = Double.parseDouble(tokens[4].trim());
|
||||
}
|
||||
}
|
||||
if (Double.isNaN(dummyDouble)) {
|
||||
throw new IOException(
|
||||
"could not find certified value of residual sum of squares");
|
||||
}
|
||||
this.residualSumOfSquares = dummyDouble;
|
||||
|
||||
if (dummyString == null) {
|
||||
throw new IOException("could not find dataset name");
|
||||
}
|
||||
this.name = dummyString;
|
||||
|
||||
this.problem = new LeastSquaresProblem();
|
||||
}
|
||||
|
||||
class LeastSquaresProblem {
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(final double[] a) {
|
||||
final int n = getNumObservations();
|
||||
final double[] yhat = new double[n];
|
||||
for (int i = 0; i < n; i++) {
|
||||
yhat[i] = getModelValue(getX(i), a);
|
||||
}
|
||||
return yhat;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(final double[] a)
|
||||
throws IllegalArgumentException {
|
||||
final int n = getNumObservations();
|
||||
final double[][] j = new double[n][];
|
||||
for (int i = 0; i < n; i++) {
|
||||
j[i] = getModelDerivatives(getX(i), a);
|
||||
}
|
||||
return j;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the name of this dataset.
|
||||
*
|
||||
* @return the name of the dataset
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of observations (data points).
|
||||
*
|
||||
* @return the number of observations
|
||||
*/
|
||||
public int getNumObservations() {
|
||||
return numObservations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of the data arrays. The data is laid out as follows <li>
|
||||
* {@code data[0][i] = x[i]},</li> <li>{@code data[1][i] = y[i]},</li>
|
||||
*
|
||||
* @return the array of data points.
|
||||
*/
|
||||
public double[][] getData() {
|
||||
return new double[][] {
|
||||
MathArrays.copyOf(x), MathArrays.copyOf(y)
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the x-value of the {@code i}-th data point.
|
||||
*
|
||||
* @param i the index of the data point
|
||||
* @return the x-value
|
||||
*/
|
||||
public double getX(final int i) {
|
||||
return x[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the y-value of the {@code i}-th data point.
|
||||
*
|
||||
* @param i the index of the data point
|
||||
* @return the y-value
|
||||
*/
|
||||
public double getY(final int i) {
|
||||
return y[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of parameters.
|
||||
*
|
||||
* @return the number of parameters
|
||||
*/
|
||||
public int getNumParameters() {
|
||||
return numParameters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the certified values of the paramters.
|
||||
*
|
||||
* @return the values of the parameters
|
||||
*/
|
||||
public double[] getParameters() {
|
||||
return MathArrays.copyOf(a);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the certified value of the {@code i}-th parameter.
|
||||
*
|
||||
* @param i the index of the parameter
|
||||
* @return the value of the parameter
|
||||
*/
|
||||
public double getParameter(final int i) {
|
||||
return a[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reurns the certified values of the standard deviations of the parameters.
|
||||
*
|
||||
* @return the standard deviations of the parameters
|
||||
*/
|
||||
public double[] getParametersStandardDeviations() {
|
||||
return MathArrays.copyOf(sigA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the certified value of the standard deviation of the {@code i}-th
|
||||
* parameter.
|
||||
*
|
||||
* @param i the index of the parameter
|
||||
* @return the standard deviation of the parameter
|
||||
*/
|
||||
public double getParameterStandardDeviation(final int i) {
|
||||
return sigA[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the certified value of the residual sum of squares.
|
||||
*
|
||||
* @return the residual sum of squares
|
||||
*/
|
||||
public double getResidualSumOfSquares() {
|
||||
return residualSumOfSquares;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of starting points (initial guesses for the
|
||||
* optimization process).
|
||||
*
|
||||
* @return the number of starting points
|
||||
*/
|
||||
public int getNumStartingPoints() {
|
||||
return numStartingPoints;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@code i}-th set of initial values of the parameters.
|
||||
*
|
||||
* @param i the index of the starting point
|
||||
* @return the starting point
|
||||
*/
|
||||
public double[] getStartingPoint(final int i) {
|
||||
return MathArrays.copyOf(startingValues[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the least-squares problem corresponding to fitting the model to
|
||||
* the specified data.
|
||||
*
|
||||
* @return the least-squares problem
|
||||
*/
|
||||
public LeastSquaresProblem getLeastSquaresProblem() {
|
||||
return problem;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value of the model for the specified values of the predictor
|
||||
* variable and the parameters.
|
||||
*
|
||||
* @param x the predictor variable
|
||||
* @param a the parameters
|
||||
* @return the value of the model
|
||||
*/
|
||||
public abstract double getModelValue(final double x, final double[] a);
|
||||
|
||||
/**
|
||||
* Returns the values of the partial derivatives of the model with respect
|
||||
* to the parameters.
|
||||
*
|
||||
* @param x the predictor variable
|
||||
* @param a the parameters
|
||||
* @return the partial derivatives
|
||||
*/
|
||||
public abstract double[] getModelDerivatives(final double x,
|
||||
final double[] a);
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Parses the specified text lines, and extracts the indices of the first
|
||||
* and last lines of the data defined by the specified {@code key}. This key
|
||||
* must be one of
|
||||
* </p>
|
||||
* <ul>
|
||||
* <li>{@code "Starting Values"},</li>
|
||||
* <li>{@code "Certified Values"},</li>
|
||||
* <li>{@code "Data"}.</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* In the NIST data files, the line indices are separated by the keywords
|
||||
* {@code "lines"} and {@code "to"}.
|
||||
* </p>
|
||||
*
|
||||
* @param lines the line of text to be parsed
|
||||
* @return an array of two {@code int}s. First value is the index of the
|
||||
* first line, second value is the index of the last line.
|
||||
* {@code null} if the line could not be parsed.
|
||||
*/
|
||||
private static int[] findLineNumbers(final String key,
|
||||
final Iterable<String> lines) {
|
||||
for (String text : lines) {
|
||||
boolean flag = text.contains(key) && text.contains("lines") &&
|
||||
text.contains("to") && text.contains(")");
|
||||
if (flag) {
|
||||
final int[] numbers = new int[2];
|
||||
final String from = text.substring(text.indexOf("lines") + 5,
|
||||
text.indexOf("to"));
|
||||
numbers[0] = Integer.parseInt(from.trim());
|
||||
final String to = text.substring(text.indexOf("to") + 2,
|
||||
text.indexOf(")"));
|
||||
numbers[1] = Integer.parseInt(to.trim());
|
||||
return numbers;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
||||
import org.apache.commons.math4.util.FastMath;
|
||||
|
||||
/**
|
||||
* A factory to create instances of {@link StatisticalReferenceDataset} from
|
||||
* available resources.
|
||||
*/
|
||||
@Deprecated
|
||||
public class StatisticalReferenceDatasetFactory {
|
||||
|
||||
private StatisticalReferenceDatasetFactory() {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new buffered reader from the specified resource name.
|
||||
*
|
||||
* @param name the name of the resource
|
||||
* @return a buffered reader
|
||||
* @throws IOException if an I/O error occurred
|
||||
*/
|
||||
public static BufferedReader createBufferedReaderFromResource(final String name)
|
||||
throws IOException {
|
||||
final InputStream resourceAsStream;
|
||||
resourceAsStream = StatisticalReferenceDatasetFactory.class
|
||||
.getResourceAsStream(name);
|
||||
if (resourceAsStream == null) {
|
||||
throw new IOException("could not find resource " + name);
|
||||
}
|
||||
return new BufferedReader(new InputStreamReader(resourceAsStream));
|
||||
}
|
||||
|
||||
public static StatisticalReferenceDataset createKirby2()
|
||||
throws IOException {
|
||||
final BufferedReader in = createBufferedReaderFromResource("Kirby2.dat");
|
||||
StatisticalReferenceDataset dataset = null;
|
||||
try {
|
||||
dataset = new StatisticalReferenceDataset(in) {
|
||||
|
||||
@Override
|
||||
public double getModelValue(final double x, final double[] a) {
|
||||
final double p = a[0] + x * (a[1] + x * a[2]);
|
||||
final double q = 1.0 + x * (a[3] + x * a[4]);
|
||||
return p / q;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getModelDerivatives(final double x,
|
||||
final double[] a) {
|
||||
final double[] dy = new double[5];
|
||||
final double p = a[0] + x * (a[1] + x * a[2]);
|
||||
final double q = 1.0 + x * (a[3] + x * a[4]);
|
||||
dy[0] = 1.0 / q;
|
||||
dy[1] = x / q;
|
||||
dy[2] = x * dy[1];
|
||||
dy[3] = -x * p / (q * q);
|
||||
dy[4] = x * dy[3];
|
||||
return dy;
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
return dataset;
|
||||
}
|
||||
|
||||
public static StatisticalReferenceDataset createHahn1()
|
||||
throws IOException {
|
||||
final BufferedReader in = createBufferedReaderFromResource("Hahn1.dat");
|
||||
StatisticalReferenceDataset dataset = null;
|
||||
try {
|
||||
dataset = new StatisticalReferenceDataset(in) {
|
||||
|
||||
@Override
|
||||
public double getModelValue(final double x, final double[] a) {
|
||||
final double p = a[0] + x * (a[1] + x * (a[2] + x * a[3]));
|
||||
final double q = 1.0 + x * (a[4] + x * (a[5] + x * a[6]));
|
||||
return p / q;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getModelDerivatives(final double x,
|
||||
final double[] a) {
|
||||
final double[] dy = new double[7];
|
||||
final double p = a[0] + x * (a[1] + x * (a[2] + x * a[3]));
|
||||
final double q = 1.0 + x * (a[4] + x * (a[5] + x * a[6]));
|
||||
dy[0] = 1.0 / q;
|
||||
dy[1] = x * dy[0];
|
||||
dy[2] = x * dy[1];
|
||||
dy[3] = x * dy[2];
|
||||
dy[4] = -x * p / (q * q);
|
||||
dy[5] = x * dy[4];
|
||||
dy[6] = x * dy[5];
|
||||
return dy;
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
return dataset;
|
||||
}
|
||||
|
||||
public static StatisticalReferenceDataset createMGH17()
|
||||
throws IOException {
|
||||
final BufferedReader in = createBufferedReaderFromResource("MGH17.dat");
|
||||
StatisticalReferenceDataset dataset = null;
|
||||
try {
|
||||
dataset = new StatisticalReferenceDataset(in) {
|
||||
|
||||
@Override
|
||||
public double getModelValue(final double x, final double[] a) {
|
||||
return a[0] + a[1] * FastMath.exp(-a[3] * x) + a[2] *
|
||||
FastMath.exp(-a[4] * x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getModelDerivatives(final double x,
|
||||
final double[] a) {
|
||||
final double[] dy = new double[5];
|
||||
dy[0] = 1.0;
|
||||
dy[1] = FastMath.exp(-x * a[3]);
|
||||
dy[2] = FastMath.exp(-x * a[4]);
|
||||
dy[3] = -x * a[1] * dy[1];
|
||||
dy[4] = -x * a[2] * dy[2];
|
||||
return dy;
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
return dataset;
|
||||
}
|
||||
|
||||
public static StatisticalReferenceDataset createLanczos1()
|
||||
throws IOException {
|
||||
final BufferedReader in =
|
||||
createBufferedReaderFromResource("Lanczos1.dat");
|
||||
StatisticalReferenceDataset dataset = null;
|
||||
try {
|
||||
dataset = new StatisticalReferenceDataset(in) {
|
||||
|
||||
@Override
|
||||
public double getModelValue(final double x, final double[] a) {
|
||||
System.out.println(a[0]+", "+a[1]+", "+a[2]+", "+a[3]+", "+a[4]+", "+a[5]);
|
||||
return a[0] * FastMath.exp(-a[3] * x) +
|
||||
a[1] * FastMath.exp(-a[4] * x) +
|
||||
a[2] * FastMath.exp(-a[5] * x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getModelDerivatives(final double x,
|
||||
final double[] a) {
|
||||
final double[] dy = new double[6];
|
||||
dy[0] = FastMath.exp(-x * a[3]);
|
||||
dy[1] = FastMath.exp(-x * a[4]);
|
||||
dy[2] = FastMath.exp(-x * a[5]);
|
||||
dy[3] = -x * a[0] * dy[0];
|
||||
dy[4] = -x * a[1] * dy[1];
|
||||
dy[5] = -x * a[2] * dy[2];
|
||||
return dy;
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
return dataset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array with all available reference datasets.
|
||||
*
|
||||
* @return the array of datasets
|
||||
* @throws IOException if an I/O error occurs
|
||||
*/
|
||||
public StatisticalReferenceDataset[] createAll()
|
||||
throws IOException {
|
||||
return new StatisticalReferenceDataset[] {
|
||||
createKirby2(), createMGH17()
|
||||
};
|
||||
}
|
||||
}
|
|
@ -1,169 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.math4.optim.nonlinear.vector.jacobian;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.math4.analysis.MultivariateMatrixFunction;
|
||||
import org.apache.commons.math4.analysis.MultivariateVectorFunction;
|
||||
import org.apache.commons.math4.analysis.UnivariateFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunction;
|
||||
import org.apache.commons.math4.optim.nonlinear.vector.ModelFunctionJacobian;
|
||||
import org.apache.commons.math4.stat.regression.SimpleRegression;
|
||||
|
||||
/**
|
||||
* Class that models a straight line defined as {@code y = a x + b}.
|
||||
* The parameters of problem are:
|
||||
* <ul>
|
||||
* <li>{@code a}</li>
|
||||
* <li>{@code b}</li>
|
||||
* </ul>
|
||||
* The model functions are:
|
||||
* <ul>
|
||||
* <li>for each pair (a, b), the y-coordinate of the line.</li>
|
||||
* </ul>
|
||||
*/
|
||||
@Deprecated
|
||||
class StraightLineProblem {
|
||||
/** Cloud of points assumed to be fitted by a straight line. */
|
||||
private final ArrayList<double[]> points;
|
||||
/** Error (on the y-coordinate of the points). */
|
||||
private final double sigma;
|
||||
|
||||
/**
|
||||
* @param error Assumed error for the y-coordinate.
|
||||
*/
|
||||
public StraightLineProblem(double error) {
|
||||
points = new ArrayList<double[]>();
|
||||
sigma = error;
|
||||
}
|
||||
|
||||
public void addPoint(double px, double py) {
|
||||
points.add(new double[] { px, py });
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the list of x-coordinates.
|
||||
*/
|
||||
public double[] x() {
|
||||
final double[] v = new double[points.size()];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
v[i] = p[0]; // x-coordinate.
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the list of y-coordinates.
|
||||
*/
|
||||
public double[] y() {
|
||||
final double[] v = new double[points.size()];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
v[i] = p[1]; // y-coordinate.
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
public double[] target() {
|
||||
return y();
|
||||
}
|
||||
|
||||
public double[] weight() {
|
||||
final double weight = 1 / (sigma * sigma);
|
||||
final double[] w = new double[points.size()];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
w[i] = weight;
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
public ModelFunction getModelFunction() {
|
||||
return new ModelFunction(new MultivariateVectorFunction() {
|
||||
public double[] value(double[] params) {
|
||||
final Model line = new Model(params[0], params[1]);
|
||||
|
||||
final double[] model = new double[points.size()];
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
model[i] = line.value(p[0]);
|
||||
}
|
||||
|
||||
return model;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ModelFunctionJacobian getModelFunctionJacobian() {
|
||||
return new ModelFunctionJacobian(new MultivariateMatrixFunction() {
|
||||
public double[][] value(double[] point) {
|
||||
return jacobian(point);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Directly solve the linear problem, using the {@link SimpleRegression}
|
||||
* class.
|
||||
*/
|
||||
public double[] solve() {
|
||||
final SimpleRegression regress = new SimpleRegression(true);
|
||||
for (double[] d : points) {
|
||||
regress.addData(d[0], d[1]);
|
||||
}
|
||||
|
||||
final double[] result = { regress.getSlope(), regress.getIntercept() };
|
||||
return result;
|
||||
}
|
||||
|
||||
private double[][] jacobian(double[] params) {
|
||||
final double[][] jacobian = new double[points.size()][2];
|
||||
|
||||
for (int i = 0; i < points.size(); i++) {
|
||||
final double[] p = points.get(i);
|
||||
// Partial derivative wrt "a".
|
||||
jacobian[i][0] = p[0];
|
||||
// Partial derivative wrt "b".
|
||||
jacobian[i][1] = 1;
|
||||
}
|
||||
|
||||
return jacobian;
|
||||
}
|
||||
|
||||
/**
|
||||
* Linear function.
|
||||
*/
|
||||
public static class Model implements UnivariateFunction {
|
||||
final double a;
|
||||
final double b;
|
||||
|
||||
public Model(double a,
|
||||
double b) {
|
||||
this.a = a;
|
||||
this.b = b;
|
||||
}
|
||||
|
||||
public double value(double x) {
|
||||
return a * x + b;
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue